in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
OpenMined__PySyft-4708
Add Windows to CI ## Description Add windows to the CI tests as a separate step for say python 3.8 and torch==1.6.0 initially just to get things working. Then if it works expand to all versions to see any potential issues. ## Definition of Done This ticket is done when we know what does and doesn't run on Windows in CI from the current "fast" tests and the new "slow" tests. Post a screenshot and link to CI here when it's running.
[ { "content": "# stdlib\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__)\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n return TORCH_VERSION >= version.parse(support_dict[\"min_version\"])\n\n\ndef create_torch_ast() -> Globals:\n ast = Globals()\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n print(f\"Skipping torch.{method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_serialization_methods()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py" } ]
[ { "content": "# stdlib\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n return TORCH_VERSION >= version.parse(support_dict[\"min_version\"])\n\n\ndef create_torch_ast() -> Globals:\n ast = Globals()\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n print(f\"Skipping torch.{method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_serialization_methods()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py" } ]
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d08e095ab60..d770082e3bc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -8,19 +8,21 @@ on: paths: - "**.py" - "setup.cfg" + - ".github/workflows/tests.yml" pull_request: types: [opened, synchronize, reopened] paths: - "**.py" - "setup.cfg" + - ".github/workflows/tests.yml" jobs: python-tests: strategy: max-parallel: 24 matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-latest, windows-latest] python-version: [3.8, 3.7, 3.6] torch-version: [1.5.0, 1.5.1, 1.6.0] @@ -49,6 +51,14 @@ jobs: restore-keys: | ${{ runner.os }}-pip-${{ matrix.python-version }}- + - uses: actions/cache@v2 + if: startsWith(runner.os, 'Windows') + with: + path: '%LOCALAPPDATA%\pip\Cache' + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip-${{ matrix.python-version }}- + - name: Cache packages uses: actions/cache@v2 id: cache-reqs @@ -61,7 +71,8 @@ jobs: pip install bandit bandit -r src -ll - - name: Run normal tests without coverage + - name: Install pytorch Linux/MacOS + if: startsWith(runner.os, 'Windows') != true env: TORCH_VERSION: ${{ matrix.torch-version }} run: | @@ -75,8 +86,25 @@ jobs: then TORCHVISION_VERSION="0.7" fi - pip install torch==$TORCH_VERSION + pip install torch==${TORCH_VERSION} pip install torchvision==${TORCHVISION_VERSION} + + - name: Install pytorch Windows + if: startsWith(runner.os, 'Windows') + env: + TORCH_VERSION: ${{ matrix.torch-version }} + run: | + If ($env:TORCH_VERSION -eq "1.5.0") { + $env:TORCHVISION_VERSION="0.6.0" + } Elseif ( $env:TORCH_VERSION -eq "1.5.1" ) { + $env:TORCHVISION_VERSION="0.6.1" + } Elseif ($env:TORCH_VERSION -eq "1.6.0") { + $env:TORCHVISION_VERSION="0.7" + } + pip install torch==$env:TORCH_VERSION+cpu torchvision==$env:TORCHVISION_VERSION+cpu -f https://download.pytorch.org/whl/torch_stable.html + + - name: Run normal tests without coverage + run: | pip install -r requirements.txt pip install -e . pip freeze | grep torch @@ -105,14 +133,6 @@ jobs: restore-keys: | ${{ runner.os }}-pip-${{ matrix.python-version }}- - - uses: actions/cache@v2 - if: startsWith(runner.os, 'macOS') - with: - path: ~/Library/Caches/pip - key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip-${{ matrix.python-version }}- - - name: Cache packages uses: actions/cache@v2 id: cache-reqs @@ -150,21 +170,12 @@ jobs: python-version: ${{ matrix.python-version }} - uses: actions/cache@v2 - if: startsWith(runner.os, 'Linux') with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip-${{ matrix.python-version }}- - - uses: actions/cache@v2 - if: startsWith(runner.os, 'macOS') - with: - path: ~/Library/Caches/pip - key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip-${{ matrix.python-version }}- - - name: Cache packages uses: actions/cache@v2 id: cache-reqs @@ -183,7 +194,7 @@ jobs: strategy: max-parallel: 24 matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-latest, windows-latest] python-version: [3.8, 3.7, 3.6] torch-version: [1.5.0, 1.5.1, 1.6.0] diff --git a/docs/installing.rst b/docs/installing.rst index f9f177c22da..f659287b32c 100644 --- a/docs/installing.rst +++ b/docs/installing.rst @@ -402,7 +402,7 @@ Step 1 - Install Git -------------------- Here is the download link for Git on windows: `Git for Windows <https://gitforwindows.org/>`_ -or in case you are lazy! you can use `Github for Desktop. <https://desktop.github.com/>`_ +Note: You need git in your path or the `pip install -e .` will fail. Step 2 - Install Microsoft Build tools -------------------------------------- diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py index 5413fe9ac0d..3700f5aa4c6 100644 --- a/src/syft/lib/torch/__init__.py +++ b/src/syft/lib/torch/__init__.py @@ -12,7 +12,7 @@ from ...ast.globals import Globals from .allowlist import allowlist -TORCH_VERSION = version.parse(torch.__version__) +TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str: diff --git a/tests/syft/grid/connections/webrtc_test.py b/tests/syft/grid/connections/webrtc_test.py index 31feeb3eada..b209cf614d3 100644 --- a/tests/syft/grid/connections/webrtc_test.py +++ b/tests/syft/grid/connections/webrtc_test.py @@ -20,7 +20,8 @@ def get_signing_key() -> SigningKey: return SigningKey(bytes.fromhex(key)) -def test_init_without_event_loop() -> None: [email protected] +async def test_init_without_event_loop() -> None: nest_asyncio.apply() domain = Domain(name="test") diff --git a/tests/syft/lib/allowlist_report.py b/tests/syft/lib/allowlist_report.py index d4128b75e85..2ccbad3d052 100644 --- a/tests/syft/lib/allowlist_report.py +++ b/tests/syft/lib/allowlist_report.py @@ -25,7 +25,7 @@ # syft absolute from syft.lib.torch import allowlist # noqa: E402 -TORCH_VERSION = version.parse(th.__version__) +TORCH_VERSION = version.parse(th.__version__.split("+")[0]) py_ver = sys.version_info PYTHON_VERSION = version.parse(f"{py_ver.major}.{py_ver.minor}") OS_NAME = platform.system().lower() diff --git a/tests/syft/lib/allowlist_test.py b/tests/syft/lib/allowlist_test.py index 80e02226571..7b4cd58b828 100644 --- a/tests/syft/lib/allowlist_test.py +++ b/tests/syft/lib/allowlist_test.py @@ -33,7 +33,7 @@ from syft.lib.torch.tensor_util import TORCH_STR_DTYPE from syft.lib.util import full_name_with_qualname -TORCH_VERSION = version.parse(th.__version__) +TORCH_VERSION = version.parse(th.__version__.split("+")[0]) py_ver = sys.version_info PYTHON_VERSION = version.parse(f"{py_ver.major}.{py_ver.minor}") OS_NAME = platform.system().lower()
xonsh__xonsh-2295
Python 3.4 no longer supported? Hi all, First of all, thank you all for your great work. I have noticed that the version bump to 0.5.7 introduced a call to `os.scandir` which is not supported by Python <3.5 afaik. As I am still using Ubuntu 14.04 with Python 3.4 on a few machines, this is a little bit of a headache... I don't know the codebase, but it looks like `xonsh.platform.scandir` could be used instead?
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Prompt formatter for simple version control branchs\"\"\"\n# pylint:disable=no-member, invalid-name\n\nimport os\nimport sys\nimport queue\nimport builtins\nimport threading\nimport subprocess\n\nimport xonsh.tools as xt\n\n\ndef _get_git_branch(q):\n try:\n branches = xt.decode_bytes(subprocess.check_output(\n ['git', 'branch'],\n stderr=subprocess.DEVNULL\n )).splitlines()\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n else:\n for branch in branches:\n if not branch.startswith('* '):\n continue\n elif branch.endswith(')'):\n branch = branch.split()[-1][:-1]\n else:\n branch = branch.split()[-1]\n\n q.put(branch)\n break\n else:\n q.put(None)\n\n\ndef get_git_branch():\n \"\"\"Attempts to find the current git branch. If this could not\n be determined (timeout, not in a git repo, etc.) then this returns None.\n \"\"\"\n branch = None\n timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')\n q = queue.Queue()\n\n t = threading.Thread(target=_get_git_branch, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n branch = q.get_nowait()\n except queue.Empty:\n branch = None\n return branch\n\n\ndef _get_hg_root(q):\n _curpwd = builtins.__xonsh_env__['PWD']\n while True:\n if not os.path.isdir(_curpwd):\n return False\n if any([b.name == '.hg' for b in os.scandir(_curpwd)]):\n q.put(_curpwd)\n break\n else:\n _oldpwd = _curpwd\n _curpwd = os.path.split(_curpwd)[0]\n if _oldpwd == _curpwd:\n return False\n\n\ndef get_hg_branch(root=None):\n \"\"\"Try to get the mercurial branch of the current directory,\n return None if not in a repo or subprocess.TimeoutExpired if timed out.\n \"\"\"\n env = builtins.__xonsh_env__\n timeout = env['VC_BRANCH_TIMEOUT']\n q = queue.Queue()\n t = threading.Thread(target=_get_hg_root, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n root = q.get_nowait()\n except queue.Empty:\n return None\n if env.get('VC_HG_SHOW_BRANCH'):\n # get branch name\n branch_path = os.path.sep.join([root, '.hg', 'branch'])\n if os.path.exists(branch_path):\n with open(branch_path, 'r') as branch_file:\n branch = branch_file.read()\n else:\n branch = 'default'\n else:\n branch = ''\n # add bookmark, if we can\n bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])\n if os.path.exists(bookmark_path):\n with open(bookmark_path, 'r') as bookmark_file:\n active_bookmark = bookmark_file.read()\n if env.get('VC_HG_SHOW_BRANCH') is True:\n branch = \"{0}, {1}\".format(*(b.strip(os.linesep) for b in\n (branch, active_bookmark)))\n else:\n branch = active_bookmark.strip(os.linesep)\n else:\n branch = branch.strip(os.linesep)\n return branch\n\n\n_FIRST_BRANCH_TIMEOUT = True\n\n\ndef _first_branch_timeout_message():\n global _FIRST_BRANCH_TIMEOUT\n sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']\n if not _FIRST_BRANCH_TIMEOUT or sbtm:\n return\n _FIRST_BRANCH_TIMEOUT = False\n print('xonsh: branch timeout: computing the branch name, color, or both '\n 'timed out while formatting the prompt. You may avoid this by '\n 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch '\n 'fields, like {curr_branch}, from your $PROMPT. See the FAQ '\n 'for more details. This message will be suppressed for the remainder '\n 'of this session. To suppress this message permanently, set '\n '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',\n file=sys.stderr)\n\n\ndef current_branch():\n \"\"\"Gets the branch for a current working directory. Returns an empty string\n if the cwd is not a repository. This currently only works for git and hg\n and should be extended in the future. If a timeout occurred, the string\n '<branch-timeout>' is returned.\n \"\"\"\n branch = None\n cmds = builtins.__xonsh_commands_cache__\n # check for binary only once\n if cmds.is_empty():\n has_git = bool(cmds.locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.locate_binary('hg', ignore_alias=True))\n else:\n has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True))\n if has_git:\n branch = get_git_branch()\n if not branch and has_hg:\n branch = get_hg_branch()\n if isinstance(branch, subprocess.TimeoutExpired):\n branch = '<branch-timeout>'\n _first_branch_timeout_message()\n return branch or None\n\n\ndef _git_dirty_working_directory(q, include_untracked):\n status = None\n try:\n cmd = ['git', 'status', '--porcelain']\n if include_untracked:\n cmd.append('--untracked-files=normal')\n else:\n cmd.append('--untracked-files=no')\n status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n if status is not None:\n return q.put(bool(status))\n\n\ndef git_dirty_working_directory(include_untracked=False):\n \"\"\"Returns whether or not the git directory is dirty. If this could not\n be determined (timeout, file not found, etc.) then this returns None.\n \"\"\"\n timeout = builtins.__xonsh_env__.get(\"VC_BRANCH_TIMEOUT\")\n q = queue.Queue()\n t = threading.Thread(target=_git_dirty_working_directory,\n args=(q, include_untracked))\n t.start()\n t.join(timeout=timeout)\n try:\n return q.get_nowait()\n except queue.Empty:\n return None\n\n\ndef hg_dirty_working_directory():\n \"\"\"Computes whether or not the mercurial working directory is dirty or not.\n If this cannot be determined, None is returned.\n \"\"\"\n env = builtins.__xonsh_env__\n cwd = env['PWD']\n denv = env.detype()\n vcbt = env['VC_BRANCH_TIMEOUT']\n # Override user configurations settings and aliases\n denv['HGRCPATH'] = ''\n try:\n s = subprocess.check_output(['hg', 'identify', '--id'],\n stderr=subprocess.PIPE, cwd=cwd,\n timeout=vcbt, universal_newlines=True,\n env=denv)\n return s.strip(os.linesep).endswith('+')\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired,\n FileNotFoundError):\n return None\n\n\ndef dirty_working_directory():\n \"\"\"Returns a boolean as to whether there are uncommitted files in version\n control repository we are inside. If this cannot be determined, returns\n None. Currently supports git and hg.\n \"\"\"\n dwd = None\n cmds = builtins.__xonsh_commands_cache__\n if cmds.lazy_locate_binary('git'):\n dwd = git_dirty_working_directory()\n if cmds.lazy_locate_binary('hg') and dwd is None:\n dwd = hg_dirty_working_directory()\n return dwd\n\n\ndef branch_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bold, intense colors\n for the foreground.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BOLD_INTENSE_YELLOW}'\n elif dwd:\n color = '{BOLD_INTENSE_RED}'\n else:\n color = '{BOLD_INTENSE_GREEN}'\n return color\n\n\ndef branch_bg_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bacground colors.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BACKGROUND_YELLOW}'\n elif dwd:\n color = '{BACKGROUND_RED}'\n else:\n color = '{BACKGROUND_GREEN}'\n return color\n", "path": "xonsh/prompt/vc.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Prompt formatter for simple version control branchs\"\"\"\n# pylint:disable=no-member, invalid-name\n\nimport os\nimport sys\nimport queue\nimport builtins\nimport threading\nimport subprocess\n\nimport xonsh.tools as xt\n\n\ndef _get_git_branch(q):\n try:\n branches = xt.decode_bytes(subprocess.check_output(\n ['git', 'branch'],\n stderr=subprocess.DEVNULL\n )).splitlines()\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n else:\n for branch in branches:\n if not branch.startswith('* '):\n continue\n elif branch.endswith(')'):\n branch = branch.split()[-1][:-1]\n else:\n branch = branch.split()[-1]\n\n q.put(branch)\n break\n else:\n q.put(None)\n\n\ndef get_git_branch():\n \"\"\"Attempts to find the current git branch. If this could not\n be determined (timeout, not in a git repo, etc.) then this returns None.\n \"\"\"\n branch = None\n timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')\n q = queue.Queue()\n\n t = threading.Thread(target=_get_git_branch, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n branch = q.get_nowait()\n except queue.Empty:\n branch = None\n return branch\n\n\ndef _get_hg_root(q):\n _curpwd = builtins.__xonsh_env__['PWD']\n while True:\n if not os.path.isdir(_curpwd):\n return False\n if any([b.name == '.hg' for b in xt.scandir(_curpwd)]):\n q.put(_curpwd)\n break\n else:\n _oldpwd = _curpwd\n _curpwd = os.path.split(_curpwd)[0]\n if _oldpwd == _curpwd:\n return False\n\n\ndef get_hg_branch(root=None):\n \"\"\"Try to get the mercurial branch of the current directory,\n return None if not in a repo or subprocess.TimeoutExpired if timed out.\n \"\"\"\n env = builtins.__xonsh_env__\n timeout = env['VC_BRANCH_TIMEOUT']\n q = queue.Queue()\n t = threading.Thread(target=_get_hg_root, args=(q,))\n t.start()\n t.join(timeout=timeout)\n try:\n root = q.get_nowait()\n except queue.Empty:\n return None\n if env.get('VC_HG_SHOW_BRANCH'):\n # get branch name\n branch_path = os.path.sep.join([root, '.hg', 'branch'])\n if os.path.exists(branch_path):\n with open(branch_path, 'r') as branch_file:\n branch = branch_file.read()\n else:\n branch = 'default'\n else:\n branch = ''\n # add bookmark, if we can\n bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])\n if os.path.exists(bookmark_path):\n with open(bookmark_path, 'r') as bookmark_file:\n active_bookmark = bookmark_file.read()\n if env.get('VC_HG_SHOW_BRANCH') is True:\n branch = \"{0}, {1}\".format(*(b.strip(os.linesep) for b in\n (branch, active_bookmark)))\n else:\n branch = active_bookmark.strip(os.linesep)\n else:\n branch = branch.strip(os.linesep)\n return branch\n\n\n_FIRST_BRANCH_TIMEOUT = True\n\n\ndef _first_branch_timeout_message():\n global _FIRST_BRANCH_TIMEOUT\n sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']\n if not _FIRST_BRANCH_TIMEOUT or sbtm:\n return\n _FIRST_BRANCH_TIMEOUT = False\n print('xonsh: branch timeout: computing the branch name, color, or both '\n 'timed out while formatting the prompt. You may avoid this by '\n 'increasing the value of $VC_BRANCH_TIMEOUT or by removing branch '\n 'fields, like {curr_branch}, from your $PROMPT. See the FAQ '\n 'for more details. This message will be suppressed for the remainder '\n 'of this session. To suppress this message permanently, set '\n '$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',\n file=sys.stderr)\n\n\ndef current_branch():\n \"\"\"Gets the branch for a current working directory. Returns an empty string\n if the cwd is not a repository. This currently only works for git and hg\n and should be extended in the future. If a timeout occurred, the string\n '<branch-timeout>' is returned.\n \"\"\"\n branch = None\n cmds = builtins.__xonsh_commands_cache__\n # check for binary only once\n if cmds.is_empty():\n has_git = bool(cmds.locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.locate_binary('hg', ignore_alias=True))\n else:\n has_git = bool(cmds.lazy_locate_binary('git', ignore_alias=True))\n has_hg = bool(cmds.lazy_locate_binary('hg', ignore_alias=True))\n if has_git:\n branch = get_git_branch()\n if not branch and has_hg:\n branch = get_hg_branch()\n if isinstance(branch, subprocess.TimeoutExpired):\n branch = '<branch-timeout>'\n _first_branch_timeout_message()\n return branch or None\n\n\ndef _git_dirty_working_directory(q, include_untracked):\n status = None\n try:\n cmd = ['git', 'status', '--porcelain']\n if include_untracked:\n cmd.append('--untracked-files=normal')\n else:\n cmd.append('--untracked-files=no')\n status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n except (subprocess.CalledProcessError, OSError, FileNotFoundError):\n q.put(None)\n if status is not None:\n return q.put(bool(status))\n\n\ndef git_dirty_working_directory(include_untracked=False):\n \"\"\"Returns whether or not the git directory is dirty. If this could not\n be determined (timeout, file not found, etc.) then this returns None.\n \"\"\"\n timeout = builtins.__xonsh_env__.get(\"VC_BRANCH_TIMEOUT\")\n q = queue.Queue()\n t = threading.Thread(target=_git_dirty_working_directory,\n args=(q, include_untracked))\n t.start()\n t.join(timeout=timeout)\n try:\n return q.get_nowait()\n except queue.Empty:\n return None\n\n\ndef hg_dirty_working_directory():\n \"\"\"Computes whether or not the mercurial working directory is dirty or not.\n If this cannot be determined, None is returned.\n \"\"\"\n env = builtins.__xonsh_env__\n cwd = env['PWD']\n denv = env.detype()\n vcbt = env['VC_BRANCH_TIMEOUT']\n # Override user configurations settings and aliases\n denv['HGRCPATH'] = ''\n try:\n s = subprocess.check_output(['hg', 'identify', '--id'],\n stderr=subprocess.PIPE, cwd=cwd,\n timeout=vcbt, universal_newlines=True,\n env=denv)\n return s.strip(os.linesep).endswith('+')\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired,\n FileNotFoundError):\n return None\n\n\ndef dirty_working_directory():\n \"\"\"Returns a boolean as to whether there are uncommitted files in version\n control repository we are inside. If this cannot be determined, returns\n None. Currently supports git and hg.\n \"\"\"\n dwd = None\n cmds = builtins.__xonsh_commands_cache__\n if cmds.lazy_locate_binary('git'):\n dwd = git_dirty_working_directory()\n if cmds.lazy_locate_binary('hg') and dwd is None:\n dwd = hg_dirty_working_directory()\n return dwd\n\n\ndef branch_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bold, intense colors\n for the foreground.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BOLD_INTENSE_YELLOW}'\n elif dwd:\n color = '{BOLD_INTENSE_RED}'\n else:\n color = '{BOLD_INTENSE_GREEN}'\n return color\n\n\ndef branch_bg_color():\n \"\"\"Return red if the current branch is dirty, yellow if the dirtiness can\n not be determined, and green if it clean. These are bacground colors.\n \"\"\"\n dwd = dirty_working_directory()\n if dwd is None:\n color = '{BACKGROUND_YELLOW}'\n elif dwd:\n color = '{BACKGROUND_RED}'\n else:\n color = '{BACKGROUND_GREEN}'\n return color\n", "path": "xonsh/prompt/vc.py" } ]
diff --git a/news/scandir_bug.rst b/news/scandir_bug.rst new file mode 100644 index 0000000000..3bfbfda90d --- /dev/null +++ b/news/scandir_bug.rst @@ -0,0 +1,13 @@ +**Added:** None + +**Changed:** None + +**Deprecated:** None + +**Removed:** None + +**Fixed:** + +* Fixed a bug on py34 where os.scandir was used by accident. + +**Security:** None diff --git a/tests/test_prompt.py b/tests/test_prompt.py index 6d1f274b8b..bdbe84a9fa 100644 --- a/tests/test_prompt.py +++ b/tests/test_prompt.py @@ -150,6 +150,17 @@ def test_test_repo(test_repo): assert os.path.isfile(os.path.join(test_repo['dir'], 'test-file')) +def test_no_repo(xonsh_builtins): + import queue + temp_dir = tempfile.mkdtemp() + xonsh_builtins.__xonsh_env__ = Env(VC_BRANCH_TIMEOUT=2, PWD=temp_dir) + q = queue.Queue() + try: + vc._get_hg_root(q) + except AttributeError: + assert False + + def test_vc_get_branch(test_repo, xonsh_builtins): xonsh_builtins.__xonsh_env__ = Env(VC_BRANCH_TIMEOUT=2) # get corresponding function from vc module diff --git a/xonsh/prompt/vc.py b/xonsh/prompt/vc.py index 3f7ad61611..6030b264fa 100644 --- a/xonsh/prompt/vc.py +++ b/xonsh/prompt/vc.py @@ -58,7 +58,7 @@ def _get_hg_root(q): while True: if not os.path.isdir(_curpwd): return False - if any([b.name == '.hg' for b in os.scandir(_curpwd)]): + if any([b.name == '.hg' for b in xt.scandir(_curpwd)]): q.put(_curpwd) break else:
ddionrails__ddionrails-801
Add dynamic range slider for publication year facet see <https://opensource.appbase.io/reactive-manual/vue/range-components/dynamicrangeslider.html>
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\" Search documents for indexing models from ddionrails.publications app into Elasticsearch\n\n\nAuthors:\n * 2019 Heinz-Alexander Fütterer (DIW Berlin)\n\nLicense:\n | **AGPL-3.0 GNU AFFERO GENERAL PUBLIC LICENSE (AGPL) 3.0**.\n | See LICENSE at the GitHub\n `repository <https://github.com/ddionrails/ddionrails/blob/master/LICENSE.md>`_\n | or at\n `<https://www.gnu.org/licenses/agpl-3.0.txt>`_.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet\nfrom django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom .models import Publication\n\n\[email protected]_document\nclass PublicationDocument(Document):\n \"\"\" Search document for publications.Publication \"\"\"\n\n # doc_type was removed in Elasticsearch 7\n type = fields.KeywordField()\n\n @staticmethod\n def prepare_type(publication: Publication) -> str:\n return \"publication\"\n\n # facets\n sub_type = fields.KeywordField()\n study = fields.KeywordField()\n year = fields.KeywordField()\n\n # prepare_FIELD will be executed while indexing FIELD\n @staticmethod\n def prepare_study(publication: Publication) -> str:\n \"\"\" Return the related study \"\"\"\n return publication.study.title()\n\n class Index: # pylint: disable=missing-docstring,too-few-public-methods\n # Name of the Elasticsearch index\n name = f\"{settings.ELASTICSEARCH_DSL_INDEX_PREFIX}publications\"\n\n class Django: # pylint: disable=missing-docstring,too-few-public-methods\n model = Publication # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = (\"abstract\", \"author\", \"cite\", \"doi\", \"name\", \"title\", \"url\")\n\n def get_queryset(self) -> QuerySet:\n \"\"\"\n Return the queryset that should be indexed by this doc type,\n with select related study.\n \"\"\"\n return super().get_queryset().select_related(\"study\")\n", "path": "ddionrails/publications/documents.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\" Search documents for indexing models from ddionrails.publications app into Elasticsearch\n\n\nAuthors:\n * 2019 Heinz-Alexander Fütterer (DIW Berlin)\n\nLicense:\n | **AGPL-3.0 GNU AFFERO GENERAL PUBLIC LICENSE (AGPL) 3.0**.\n | See LICENSE at the GitHub\n `repository <https://github.com/ddionrails/ddionrails/blob/master/LICENSE.md>`_\n | or at\n `<https://www.gnu.org/licenses/agpl-3.0.txt>`_.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet\nfrom django_elasticsearch_dsl import Document, fields\nfrom django_elasticsearch_dsl.registries import registry\n\nfrom .models import Publication\n\n\[email protected]_document\nclass PublicationDocument(Document):\n \"\"\" Search document for publications.Publication \"\"\"\n\n # doc_type was removed in Elasticsearch 7\n type = fields.KeywordField()\n\n @staticmethod\n def prepare_type(publication: Publication) -> str:\n return \"publication\"\n\n # facets\n sub_type = fields.KeywordField()\n study = fields.KeywordField()\n year = fields.IntegerField()\n\n # prepare_FIELD will be executed while indexing FIELD\n @staticmethod\n def prepare_study(publication: Publication) -> str:\n \"\"\" Return the related study \"\"\"\n return publication.study.title()\n\n class Index: # pylint: disable=missing-docstring,too-few-public-methods\n # Name of the Elasticsearch index\n name = f\"{settings.ELASTICSEARCH_DSL_INDEX_PREFIX}publications\"\n\n class Django: # pylint: disable=missing-docstring,too-few-public-methods\n model = Publication # The model associated with this Document\n\n # The fields of the model you want to be indexed in Elasticsearch\n fields = (\"abstract\", \"author\", \"cite\", \"doi\", \"name\", \"title\", \"url\")\n\n def get_queryset(self) -> QuerySet:\n \"\"\"\n Return the queryset that should be indexed by this doc type,\n with select related study.\n \"\"\"\n return super().get_queryset().select_related(\"study\")\n", "path": "ddionrails/publications/documents.py" } ]
diff --git a/assets/js/search/components/facets/PublicationYearFacet.vue b/assets/js/search/components/facets/PublicationYearFacet.vue index 507c41b3a..4ad01f6ff 100644 --- a/assets/js/search/components/facets/PublicationYearFacet.vue +++ b/assets/js/search/components/facets/PublicationYearFacet.vue @@ -1,22 +1,25 @@ <template> - <multi-list + <dynamic-range-slider + dataField="year" componentId="Year" - data-field="year" title="Year" - :showSearch="false" - selectAllLabel="Select all" + :showFilter="true" :URLParams="true" - :react="react" + :rangeLabels="function(min, max){ + return { + start: min, + end: max, + } + }" class="card facet" :innerClass="{ - title: 'card-header', + title: 'card-header' }" - /> +/> </template> <script> export default { - name: "PublicationYearFacet", - props: ["react"] + name: "PublicationYearFacet" }; </script> \ No newline at end of file diff --git a/assets/js/search/components/searches/PublicationSearch.vue b/assets/js/search/components/searches/PublicationSearch.vue index c5604b961..a5f5da405 100644 --- a/assets/js/search/components/searches/PublicationSearch.vue +++ b/assets/js/search/components/searches/PublicationSearch.vue @@ -19,8 +19,8 @@ <div class="facet-container col-lg-3 my-2 float-left"> <!-- begin facets --> <study-facet :react="{ and: ['Search', 'Type', 'Year'] }" /> + <publication-year-facet /> <publication-type-facet :react="{ and: ['Search', 'Study', 'Year'] }" /> - <publication-year-facet :react="{ and: ['Search', 'Study', 'Type'] }" /> <!-- end facets --> </div> <div class="col-lg-8 m-0 p-0 float-right"> diff --git a/ddionrails/publications/documents.py b/ddionrails/publications/documents.py index 8a2914a4d..221151e7f 100644 --- a/ddionrails/publications/documents.py +++ b/ddionrails/publications/documents.py @@ -36,7 +36,7 @@ def prepare_type(publication: Publication) -> str: # facets sub_type = fields.KeywordField() study = fields.KeywordField() - year = fields.KeywordField() + year = fields.IntegerField() # prepare_FIELD will be executed while indexing FIELD @staticmethod
facebookresearch__ParlAI-1671
embeddingsize or embedding_size When I search 'embeddingsize' in this repository, I see many files referencing `opt['embeddingsize']` and similarly for 'embedding_size'. Unless there is a real reason for having both, could you please merge the two options 'embeddingsize' and 'embedding_size'? This threw me off. Here is one example set of files: 'embeddingsize' https://github.com/facebookresearch/ParlAI/blob/a43f2880719c5a048fdf3d0aa5d5b25eeb9a1a41/projects/wizard_of_wikipedia/generator/train_end2end.py#L21 'embedding_size' https://github.com/facebookresearch/ParlAI/blob/8ab911a29dbbe5cfb7d3e615cccf8f4c76066ff1/projects/wizard_of_wikipedia/generator/agents.py#L33
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.scripts.train_model import setup_args, TrainLoop\n\nif __name__ == '__main__':\n parser = setup_args()\n parser.set_defaults(\n task='wizard_of_wikipedia:generator:random_split',\n model='projects.wizard_of_wikipedia.generator.agents:EndToEndAgent',\n model_file='/tmp/end2end_generator/model',\n dict_lower=True,\n dict_tokenizer='bpe',\n n_layers=5,\n n_heads=2,\n dropout=0.20,\n ffn_size=512,\n embeddingsize=256,\n log_every_n_secs=10,\n validation_patience=12,\n validation_metric='ppl',\n validation_metric_mode='min',\n validation_every_n_epochs=0.5,\n n_positions=128,\n truncate=128,\n max_knowledge=32,\n knowledge_alpha=0.95,\n knowledge_truncate=32,\n learningrate=5e-4,\n warmup_updates=5000,\n clip=0.1,\n lr_scheduler='invsqrt',\n embedding_type='fasttext',\n beam_size=1,\n skip_generation=False,\n batchsize=64,\n )\n TrainLoop(parser.parse_args()).train()\n", "path": "projects/wizard_of_wikipedia/generator/train_end2end.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.scripts.train_model import setup_args, TrainLoop\n\nif __name__ == '__main__':\n parser = setup_args()\n parser.set_defaults(\n task='wizard_of_wikipedia:generator:random_split',\n model='projects.wizard_of_wikipedia.generator.agents:EndToEndAgent',\n model_file='/tmp/end2end_generator/model',\n dict_lower=True,\n dict_tokenizer='bpe',\n n_layers=5,\n n_heads=2,\n dropout=0.20,\n ffn_size=512,\n embedding_size=256,\n log_every_n_secs=10,\n validation_patience=12,\n validation_metric='ppl',\n validation_metric_mode='min',\n validation_every_n_epochs=0.5,\n n_positions=128,\n truncate=128,\n max_knowledge=32,\n knowledge_alpha=0.95,\n knowledge_truncate=32,\n learningrate=5e-4,\n warmup_updates=5000,\n clip=0.1,\n lr_scheduler='invsqrt',\n embedding_type='fasttext',\n beam_size=1,\n skip_generation=False,\n batchsize=64,\n )\n TrainLoop(parser.parse_args()).train()\n", "path": "projects/wizard_of_wikipedia/generator/train_end2end.py" } ]
diff --git a/projects/wizard_of_wikipedia/generator/train_end2end.py b/projects/wizard_of_wikipedia/generator/train_end2end.py index ff9f57ad747..74edc3f887e 100644 --- a/projects/wizard_of_wikipedia/generator/train_end2end.py +++ b/projects/wizard_of_wikipedia/generator/train_end2end.py @@ -18,7 +18,7 @@ n_heads=2, dropout=0.20, ffn_size=512, - embeddingsize=256, + embedding_size=256, log_every_n_secs=10, validation_patience=12, validation_metric='ppl',
dbt-labs__dbt-core-8568
[CT-2982] Fix untyped functions in task/base.py (mypy warning) ### Problem When we hit a mypy error the console is flooded with mypy warnings as well. Clean up the warning to improve developer experience. ### Reproduction Simply force a mypy error (change types) and the warnings will be generated. ### Relevant Output ``` core/dbt/task/base.py:204: note: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs [annotation-unchecked] ``` [CT-2982] Fix untyped functions in task/base.py (mypy warning) ### Problem When we hit a mypy error the console is flooded with mypy warnings as well. Clean up the warning to improve developer experience. ### Reproduction Simply force a mypy error (change types) and the warnings will be generated. ### Relevant Output ``` core/dbt/task/base.py:204: note: By default the bodies of untyped functions are not checked, consider using --check-untyped-defs [annotation-unchecked] ```
[ { "content": "import os\nimport threading\nimport time\nimport traceback\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import nullcontext\nfrom datetime import datetime\nfrom typing import Type, Union, Dict, Any, Optional\n\nimport dbt.exceptions\nfrom dbt import tracking\nfrom dbt.adapters.factory import get_adapter\nfrom dbt.config import RuntimeConfig, Project\nfrom dbt.config.profile import read_profile\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import (\n NodeStatus,\n RunResult,\n collect_timing_info,\n RunStatus,\n RunningStatus,\n)\nfrom dbt.events.contextvars import get_node_info\nfrom dbt.events.functions import fire_event\nfrom dbt.events.types import (\n LogDbtProjectError,\n LogDbtProfileError,\n CatchableExceptionOnRun,\n InternalErrorOnRun,\n GenericExceptionOnRun,\n NodeConnectionReleaseError,\n LogDebugStackTrace,\n SkippingDetails,\n LogSkipBecauseError,\n NodeCompiling,\n NodeExecuting,\n)\nfrom dbt.exceptions import (\n NotImplementedError,\n CompilationError,\n DbtRuntimeError,\n DbtInternalError,\n)\nfrom dbt.flags import get_flags\nfrom dbt.graph import Graph\nfrom dbt.logger import log_manager\nfrom .printer import print_run_result_error\n\n\nclass NoneConfig:\n @classmethod\n def from_args(cls, args):\n return None\n\n\ndef read_profiles(profiles_dir=None):\n \"\"\"This is only used for some error handling\"\"\"\n if profiles_dir is None:\n profiles_dir = get_flags().PROFILES_DIR\n\n raw_profiles = read_profile(profiles_dir)\n\n if raw_profiles is None:\n profiles = {}\n else:\n profiles = {k: v for (k, v) in raw_profiles.items() if k != \"config\"}\n\n return profiles\n\n\nclass BaseTask(metaclass=ABCMeta):\n ConfigType: Union[Type[NoneConfig], Type[Project]] = NoneConfig\n\n def __init__(self, args, config, project=None):\n self.args = args\n self.config = config\n self.project = config if isinstance(config, Project) else project\n\n @classmethod\n def pre_init_hook(cls, args):\n \"\"\"A hook called before the task is initialized.\"\"\"\n if args.log_format == \"json\":\n log_manager.format_json()\n else:\n log_manager.format_text()\n\n @classmethod\n def set_log_format(cls):\n if get_flags().LOG_FORMAT == \"json\":\n log_manager.format_json()\n else:\n log_manager.format_text()\n\n @classmethod\n def from_args(cls, args, *pargs, **kwargs):\n try:\n # This is usually RuntimeConfig\n config = cls.ConfigType.from_args(args)\n except dbt.exceptions.DbtProjectError as exc:\n fire_event(LogDbtProjectError(exc=str(exc)))\n\n tracking.track_invalid_invocation(args=args, result_type=exc.result_type)\n raise dbt.exceptions.DbtRuntimeError(\"Could not run dbt\") from exc\n except dbt.exceptions.DbtProfileError as exc:\n all_profile_names = list(read_profiles(get_flags().PROFILES_DIR).keys())\n fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names))\n tracking.track_invalid_invocation(args=args, result_type=exc.result_type)\n raise dbt.exceptions.DbtRuntimeError(\"Could not run dbt\") from exc\n return cls(args, config, *pargs, **kwargs)\n\n @abstractmethod\n def run(self):\n raise dbt.exceptions.NotImplementedError(\"Not Implemented\")\n\n def interpret_results(self, results):\n return True\n\n\ndef get_nearest_project_dir(project_dir: Optional[str]) -> str:\n # If the user provides an explicit project directory, use that\n # but don't look at parent directories.\n if project_dir:\n project_file = os.path.join(project_dir, \"dbt_project.yml\")\n if os.path.exists(project_file):\n return project_dir\n else:\n raise dbt.exceptions.DbtRuntimeError(\n \"fatal: Invalid --project-dir flag. Not a dbt project. \"\n \"Missing dbt_project.yml file\"\n )\n\n root_path = os.path.abspath(os.sep)\n cwd = os.getcwd()\n\n while cwd != root_path:\n project_file = os.path.join(cwd, \"dbt_project.yml\")\n if os.path.exists(project_file):\n return cwd\n cwd = os.path.dirname(cwd)\n\n raise dbt.exceptions.DbtRuntimeError(\n \"fatal: Not a dbt project (or any of the parent directories). \"\n \"Missing dbt_project.yml file\"\n )\n\n\ndef move_to_nearest_project_dir(project_dir: Optional[str]) -> str:\n nearest_project_dir = get_nearest_project_dir(project_dir)\n os.chdir(nearest_project_dir)\n return nearest_project_dir\n\n\n# TODO: look into deprecating this class in favor of several small functions that\n# produce the same behavior. currently this class only contains manifest compilation,\n# holding a manifest, and moving direcories.\nclass ConfiguredTask(BaseTask):\n ConfigType = RuntimeConfig\n\n def __init__(self, args, config, manifest: Optional[Manifest] = None):\n super().__init__(args, config)\n self.graph: Optional[Graph] = None\n self.manifest = manifest\n\n def compile_manifest(self):\n if self.manifest is None:\n raise DbtInternalError(\"compile_manifest called before manifest was loaded\")\n\n start_compile_manifest = time.perf_counter()\n\n # we cannot get adapter in init since it will break rpc #5579\n adapter = get_adapter(self.config)\n compiler = adapter.get_compiler()\n self.graph = compiler.compile(self.manifest)\n\n compile_time = time.perf_counter() - start_compile_manifest\n if dbt.tracking.active_user is not None:\n dbt.tracking.track_runnable_timing({\"graph_compilation_elapsed\": compile_time})\n\n @classmethod\n def from_args(cls, args, *pargs, **kwargs):\n move_to_nearest_project_dir(args.project_dir)\n return super().from_args(args, *pargs, **kwargs)\n\n\nclass ExecutionContext:\n \"\"\"During execution and error handling, dbt makes use of mutable state:\n timing information and the newest (compiled vs executed) form of the node.\n \"\"\"\n\n def __init__(self, node):\n self.timing = []\n self.node = node\n\n\nclass BaseRunner(metaclass=ABCMeta):\n def __init__(self, config, adapter, node, node_index, num_nodes):\n self.config = config\n self.adapter = adapter\n self.node = node\n self.node_index = node_index\n self.num_nodes = num_nodes\n\n self.skip = False\n self.skip_cause: Optional[RunResult] = None\n\n self.run_ephemeral_models = False\n\n @abstractmethod\n def compile(self, manifest: Manifest) -> Any:\n pass\n\n def get_result_status(self, result) -> Dict[str, str]:\n if result.status == NodeStatus.Error:\n return {\"node_status\": \"error\", \"node_error\": str(result.message)}\n elif result.status == NodeStatus.Skipped:\n return {\"node_status\": \"skipped\"}\n elif result.status == NodeStatus.Fail:\n return {\"node_status\": \"failed\"}\n elif result.status == NodeStatus.Warn:\n return {\"node_status\": \"warn\"}\n else:\n return {\"node_status\": \"passed\"}\n\n def run_with_hooks(self, manifest):\n if self.skip:\n return self.on_skip()\n\n # no before/after printing for ephemeral mdoels\n if not self.node.is_ephemeral_model:\n self.before_execute()\n\n result = self.safe_run(manifest)\n self.node.update_event_status(\n node_status=result.status, finished_at=datetime.utcnow().isoformat()\n )\n\n if not self.node.is_ephemeral_model:\n self.after_execute(result)\n\n return result\n\n def _build_run_result(\n self,\n node,\n start_time,\n status,\n timing_info,\n message,\n agate_table=None,\n adapter_response=None,\n failures=None,\n ):\n execution_time = time.time() - start_time\n thread_id = threading.current_thread().name\n if adapter_response is None:\n adapter_response = {}\n return RunResult(\n status=status,\n thread_id=thread_id,\n execution_time=execution_time,\n timing=timing_info,\n message=message,\n node=node,\n agate_table=agate_table,\n adapter_response=adapter_response,\n failures=failures,\n )\n\n def error_result(self, node, message, start_time, timing_info):\n return self._build_run_result(\n node=node,\n start_time=start_time,\n status=RunStatus.Error,\n timing_info=timing_info,\n message=message,\n )\n\n def ephemeral_result(self, node, start_time, timing_info):\n return self._build_run_result(\n node=node,\n start_time=start_time,\n status=RunStatus.Success,\n timing_info=timing_info,\n message=None,\n )\n\n def from_run_result(self, result, start_time, timing_info):\n return self._build_run_result(\n node=result.node,\n start_time=start_time,\n status=result.status,\n timing_info=timing_info,\n message=result.message,\n agate_table=result.agate_table,\n adapter_response=result.adapter_response,\n failures=result.failures,\n )\n\n def compile_and_execute(self, manifest, ctx):\n result = None\n with self.adapter.connection_for(self.node) if get_flags().INTROSPECT else nullcontext():\n ctx.node.update_event_status(node_status=RunningStatus.Compiling)\n fire_event(\n NodeCompiling(\n node_info=ctx.node.node_info,\n )\n )\n with collect_timing_info(\"compile\", ctx.timing.append):\n # if we fail here, we still have a compiled node to return\n # this has the benefit of showing a build path for the errant\n # model\n ctx.node = self.compile(manifest)\n\n # for ephemeral nodes, we only want to compile, not run\n if not ctx.node.is_ephemeral_model or self.run_ephemeral_models:\n ctx.node.update_event_status(node_status=RunningStatus.Executing)\n fire_event(\n NodeExecuting(\n node_info=ctx.node.node_info,\n )\n )\n with collect_timing_info(\"execute\", ctx.timing.append):\n result = self.run(ctx.node, manifest)\n ctx.node = result.node\n\n return result\n\n def _handle_catchable_exception(self, e, ctx):\n if e.node is None:\n e.add_node(ctx.node)\n\n fire_event(\n CatchableExceptionOnRun(\n exc=str(e), exc_info=traceback.format_exc(), node_info=get_node_info()\n )\n )\n return str(e)\n\n def _handle_internal_exception(self, e, ctx):\n fire_event(InternalErrorOnRun(build_path=self.node.build_path, exc=str(e)))\n return str(e)\n\n def _handle_generic_exception(self, e, ctx):\n fire_event(\n GenericExceptionOnRun(\n build_path=self.node.build_path,\n unique_id=self.node.unique_id,\n exc=str(e),\n )\n )\n fire_event(LogDebugStackTrace(exc_info=traceback.format_exc()))\n\n return str(e)\n\n def handle_exception(self, e, ctx):\n catchable_errors = (CompilationError, DbtRuntimeError)\n if isinstance(e, catchable_errors):\n error = self._handle_catchable_exception(e, ctx)\n elif isinstance(e, DbtInternalError):\n error = self._handle_internal_exception(e, ctx)\n else:\n error = self._handle_generic_exception(e, ctx)\n return error\n\n def safe_run(self, manifest):\n started = time.time()\n ctx = ExecutionContext(self.node)\n error = None\n result = None\n\n try:\n result = self.compile_and_execute(manifest, ctx)\n except Exception as e:\n error = self.handle_exception(e, ctx)\n finally:\n exc_str = self._safe_release_connection()\n\n # if releasing failed and the result doesn't have an error yet, set\n # an error\n if (\n exc_str is not None\n and result is not None\n and result.status != NodeStatus.Error\n and error is None\n ):\n error = exc_str\n\n if error is not None:\n result = self.error_result(ctx.node, error, started, ctx.timing)\n elif result is not None:\n result = self.from_run_result(result, started, ctx.timing)\n else:\n result = self.ephemeral_result(ctx.node, started, ctx.timing)\n return result\n\n def _safe_release_connection(self):\n \"\"\"Try to release a connection. If an exception is hit, log and return\n the error string.\n \"\"\"\n try:\n self.adapter.release_connection()\n except Exception as exc:\n fire_event(\n NodeConnectionReleaseError(\n node_name=self.node.name, exc=str(exc), exc_info=traceback.format_exc()\n )\n )\n return str(exc)\n\n return None\n\n def before_execute(self):\n raise NotImplementedError()\n\n def execute(self, compiled_node, manifest):\n raise NotImplementedError()\n\n def run(self, compiled_node, manifest):\n return self.execute(compiled_node, manifest)\n\n def after_execute(self, result):\n raise NotImplementedError()\n\n def _skip_caused_by_ephemeral_failure(self):\n if self.skip_cause is None or self.skip_cause.node is None:\n return False\n return self.skip_cause.node.is_ephemeral_model\n\n def on_skip(self):\n schema_name = self.node.schema\n node_name = self.node.name\n\n error_message = None\n if not self.node.is_ephemeral_model:\n # if this model was skipped due to an upstream ephemeral model\n # failure, print a special 'error skip' message.\n if self._skip_caused_by_ephemeral_failure():\n fire_event(\n LogSkipBecauseError(\n schema=schema_name,\n relation=node_name,\n index=self.node_index,\n total=self.num_nodes,\n )\n )\n print_run_result_error(result=self.skip_cause, newline=False)\n if self.skip_cause is None: # mypy appeasement\n raise DbtInternalError(\n \"Skip cause not set but skip was somehow caused by an ephemeral failure\"\n )\n # set an error so dbt will exit with an error code\n error_message = (\n \"Compilation Error in {}, caused by compilation error \"\n \"in referenced ephemeral model {}\".format(\n self.node.unique_id, self.skip_cause.node.unique_id\n )\n )\n else:\n # 'skipped' nodes should not have a value for 'node_finished_at'\n # they do have 'node_started_at', which is set in GraphRunnableTask.call_runner\n self.node.update_event_status(node_status=RunStatus.Skipped)\n fire_event(\n SkippingDetails(\n resource_type=self.node.resource_type,\n schema=schema_name,\n node_name=node_name,\n index=self.node_index,\n total=self.num_nodes,\n node_info=self.node.node_info,\n )\n )\n\n node_result = RunResult.from_node(self.node, RunStatus.Skipped, error_message)\n return node_result\n\n def do_skip(self, cause=None):\n self.skip = True\n self.skip_cause = cause\n", "path": "core/dbt/task/base.py" } ]
[ { "content": "import os\nimport threading\nimport time\nimport traceback\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import nullcontext\nfrom datetime import datetime\nfrom typing import Type, Union, Dict, Any, Optional\n\nimport dbt.exceptions\nfrom dbt import tracking\nfrom dbt.adapters.factory import get_adapter\nfrom dbt.config import RuntimeConfig, Project\nfrom dbt.config.profile import read_profile\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import (\n NodeStatus,\n RunResult,\n collect_timing_info,\n RunStatus,\n RunningStatus,\n)\nfrom dbt.events.contextvars import get_node_info\nfrom dbt.events.functions import fire_event\nfrom dbt.events.types import (\n LogDbtProjectError,\n LogDbtProfileError,\n CatchableExceptionOnRun,\n InternalErrorOnRun,\n GenericExceptionOnRun,\n NodeConnectionReleaseError,\n LogDebugStackTrace,\n SkippingDetails,\n LogSkipBecauseError,\n NodeCompiling,\n NodeExecuting,\n)\nfrom dbt.exceptions import (\n NotImplementedError,\n CompilationError,\n DbtRuntimeError,\n DbtInternalError,\n)\nfrom dbt.flags import get_flags\nfrom dbt.graph import Graph\nfrom dbt.logger import log_manager\nfrom .printer import print_run_result_error\n\n\nclass NoneConfig:\n @classmethod\n def from_args(cls, args):\n return None\n\n\ndef read_profiles(profiles_dir=None):\n \"\"\"This is only used for some error handling\"\"\"\n if profiles_dir is None:\n profiles_dir = get_flags().PROFILES_DIR\n\n raw_profiles = read_profile(profiles_dir)\n\n if raw_profiles is None:\n profiles = {}\n else:\n profiles = {k: v for (k, v) in raw_profiles.items() if k != \"config\"}\n\n return profiles\n\n\nclass BaseTask(metaclass=ABCMeta):\n ConfigType: Union[Type[NoneConfig], Type[Project]] = NoneConfig\n\n def __init__(self, args, config, project=None):\n self.args = args\n self.config = config\n self.project = config if isinstance(config, Project) else project\n\n @classmethod\n def pre_init_hook(cls, args):\n \"\"\"A hook called before the task is initialized.\"\"\"\n if args.log_format == \"json\":\n log_manager.format_json()\n else:\n log_manager.format_text()\n\n @classmethod\n def set_log_format(cls):\n if get_flags().LOG_FORMAT == \"json\":\n log_manager.format_json()\n else:\n log_manager.format_text()\n\n @classmethod\n def from_args(cls, args, *pargs, **kwargs):\n try:\n # This is usually RuntimeConfig\n config = cls.ConfigType.from_args(args)\n except dbt.exceptions.DbtProjectError as exc:\n fire_event(LogDbtProjectError(exc=str(exc)))\n\n tracking.track_invalid_invocation(args=args, result_type=exc.result_type)\n raise dbt.exceptions.DbtRuntimeError(\"Could not run dbt\") from exc\n except dbt.exceptions.DbtProfileError as exc:\n all_profile_names = list(read_profiles(get_flags().PROFILES_DIR).keys())\n fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names))\n tracking.track_invalid_invocation(args=args, result_type=exc.result_type)\n raise dbt.exceptions.DbtRuntimeError(\"Could not run dbt\") from exc\n return cls(args, config, *pargs, **kwargs)\n\n @abstractmethod\n def run(self):\n raise dbt.exceptions.NotImplementedError(\"Not Implemented\")\n\n def interpret_results(self, results):\n return True\n\n\ndef get_nearest_project_dir(project_dir: Optional[str]) -> str:\n # If the user provides an explicit project directory, use that\n # but don't look at parent directories.\n if project_dir:\n project_file = os.path.join(project_dir, \"dbt_project.yml\")\n if os.path.exists(project_file):\n return project_dir\n else:\n raise dbt.exceptions.DbtRuntimeError(\n \"fatal: Invalid --project-dir flag. Not a dbt project. \"\n \"Missing dbt_project.yml file\"\n )\n\n root_path = os.path.abspath(os.sep)\n cwd = os.getcwd()\n\n while cwd != root_path:\n project_file = os.path.join(cwd, \"dbt_project.yml\")\n if os.path.exists(project_file):\n return cwd\n cwd = os.path.dirname(cwd)\n\n raise dbt.exceptions.DbtRuntimeError(\n \"fatal: Not a dbt project (or any of the parent directories). \"\n \"Missing dbt_project.yml file\"\n )\n\n\ndef move_to_nearest_project_dir(project_dir: Optional[str]) -> str:\n nearest_project_dir = get_nearest_project_dir(project_dir)\n os.chdir(nearest_project_dir)\n return nearest_project_dir\n\n\n# TODO: look into deprecating this class in favor of several small functions that\n# produce the same behavior. currently this class only contains manifest compilation,\n# holding a manifest, and moving direcories.\nclass ConfiguredTask(BaseTask):\n ConfigType = RuntimeConfig\n\n def __init__(self, args, config, manifest: Optional[Manifest] = None):\n super().__init__(args, config)\n self.graph: Optional[Graph] = None\n self.manifest = manifest\n\n def compile_manifest(self):\n if self.manifest is None:\n raise DbtInternalError(\"compile_manifest called before manifest was loaded\")\n\n start_compile_manifest = time.perf_counter()\n\n # we cannot get adapter in init since it will break rpc #5579\n adapter = get_adapter(self.config)\n compiler = adapter.get_compiler()\n self.graph = compiler.compile(self.manifest)\n\n compile_time = time.perf_counter() - start_compile_manifest\n if dbt.tracking.active_user is not None:\n dbt.tracking.track_runnable_timing({\"graph_compilation_elapsed\": compile_time})\n\n @classmethod\n def from_args(cls, args, *pargs, **kwargs):\n move_to_nearest_project_dir(args.project_dir)\n return super().from_args(args, *pargs, **kwargs)\n\n\nclass ExecutionContext:\n \"\"\"During execution and error handling, dbt makes use of mutable state:\n timing information and the newest (compiled vs executed) form of the node.\n \"\"\"\n\n def __init__(self, node):\n self.timing = []\n self.node = node\n\n\nclass BaseRunner(metaclass=ABCMeta):\n def __init__(self, config, adapter, node, node_index, num_nodes) -> None:\n self.config = config\n self.adapter = adapter\n self.node = node\n self.node_index = node_index\n self.num_nodes = num_nodes\n\n self.skip = False\n self.skip_cause: Optional[RunResult] = None\n\n self.run_ephemeral_models = False\n\n @abstractmethod\n def compile(self, manifest: Manifest) -> Any:\n pass\n\n def get_result_status(self, result) -> Dict[str, str]:\n if result.status == NodeStatus.Error:\n return {\"node_status\": \"error\", \"node_error\": str(result.message)}\n elif result.status == NodeStatus.Skipped:\n return {\"node_status\": \"skipped\"}\n elif result.status == NodeStatus.Fail:\n return {\"node_status\": \"failed\"}\n elif result.status == NodeStatus.Warn:\n return {\"node_status\": \"warn\"}\n else:\n return {\"node_status\": \"passed\"}\n\n def run_with_hooks(self, manifest):\n if self.skip:\n return self.on_skip()\n\n # no before/after printing for ephemeral mdoels\n if not self.node.is_ephemeral_model:\n self.before_execute()\n\n result = self.safe_run(manifest)\n self.node.update_event_status(\n node_status=result.status, finished_at=datetime.utcnow().isoformat()\n )\n\n if not self.node.is_ephemeral_model:\n self.after_execute(result)\n\n return result\n\n def _build_run_result(\n self,\n node,\n start_time,\n status,\n timing_info,\n message,\n agate_table=None,\n adapter_response=None,\n failures=None,\n ):\n execution_time = time.time() - start_time\n thread_id = threading.current_thread().name\n if adapter_response is None:\n adapter_response = {}\n return RunResult(\n status=status,\n thread_id=thread_id,\n execution_time=execution_time,\n timing=timing_info,\n message=message,\n node=node,\n agate_table=agate_table,\n adapter_response=adapter_response,\n failures=failures,\n )\n\n def error_result(self, node, message, start_time, timing_info):\n return self._build_run_result(\n node=node,\n start_time=start_time,\n status=RunStatus.Error,\n timing_info=timing_info,\n message=message,\n )\n\n def ephemeral_result(self, node, start_time, timing_info):\n return self._build_run_result(\n node=node,\n start_time=start_time,\n status=RunStatus.Success,\n timing_info=timing_info,\n message=None,\n )\n\n def from_run_result(self, result, start_time, timing_info):\n return self._build_run_result(\n node=result.node,\n start_time=start_time,\n status=result.status,\n timing_info=timing_info,\n message=result.message,\n agate_table=result.agate_table,\n adapter_response=result.adapter_response,\n failures=result.failures,\n )\n\n def compile_and_execute(self, manifest, ctx):\n result = None\n with self.adapter.connection_for(self.node) if get_flags().INTROSPECT else nullcontext():\n ctx.node.update_event_status(node_status=RunningStatus.Compiling)\n fire_event(\n NodeCompiling(\n node_info=ctx.node.node_info,\n )\n )\n with collect_timing_info(\"compile\", ctx.timing.append):\n # if we fail here, we still have a compiled node to return\n # this has the benefit of showing a build path for the errant\n # model\n ctx.node = self.compile(manifest)\n\n # for ephemeral nodes, we only want to compile, not run\n if not ctx.node.is_ephemeral_model or self.run_ephemeral_models:\n ctx.node.update_event_status(node_status=RunningStatus.Executing)\n fire_event(\n NodeExecuting(\n node_info=ctx.node.node_info,\n )\n )\n with collect_timing_info(\"execute\", ctx.timing.append):\n result = self.run(ctx.node, manifest)\n ctx.node = result.node\n\n return result\n\n def _handle_catchable_exception(self, e, ctx):\n if e.node is None:\n e.add_node(ctx.node)\n\n fire_event(\n CatchableExceptionOnRun(\n exc=str(e), exc_info=traceback.format_exc(), node_info=get_node_info()\n )\n )\n return str(e)\n\n def _handle_internal_exception(self, e, ctx):\n fire_event(InternalErrorOnRun(build_path=self.node.build_path, exc=str(e)))\n return str(e)\n\n def _handle_generic_exception(self, e, ctx):\n fire_event(\n GenericExceptionOnRun(\n build_path=self.node.build_path,\n unique_id=self.node.unique_id,\n exc=str(e),\n )\n )\n fire_event(LogDebugStackTrace(exc_info=traceback.format_exc()))\n\n return str(e)\n\n def handle_exception(self, e, ctx):\n catchable_errors = (CompilationError, DbtRuntimeError)\n if isinstance(e, catchable_errors):\n error = self._handle_catchable_exception(e, ctx)\n elif isinstance(e, DbtInternalError):\n error = self._handle_internal_exception(e, ctx)\n else:\n error = self._handle_generic_exception(e, ctx)\n return error\n\n def safe_run(self, manifest):\n started = time.time()\n ctx = ExecutionContext(self.node)\n error = None\n result = None\n\n try:\n result = self.compile_and_execute(manifest, ctx)\n except Exception as e:\n error = self.handle_exception(e, ctx)\n finally:\n exc_str = self._safe_release_connection()\n\n # if releasing failed and the result doesn't have an error yet, set\n # an error\n if (\n exc_str is not None\n and result is not None\n and result.status != NodeStatus.Error\n and error is None\n ):\n error = exc_str\n\n if error is not None:\n result = self.error_result(ctx.node, error, started, ctx.timing)\n elif result is not None:\n result = self.from_run_result(result, started, ctx.timing)\n else:\n result = self.ephemeral_result(ctx.node, started, ctx.timing)\n return result\n\n def _safe_release_connection(self):\n \"\"\"Try to release a connection. If an exception is hit, log and return\n the error string.\n \"\"\"\n try:\n self.adapter.release_connection()\n except Exception as exc:\n fire_event(\n NodeConnectionReleaseError(\n node_name=self.node.name, exc=str(exc), exc_info=traceback.format_exc()\n )\n )\n return str(exc)\n\n return None\n\n def before_execute(self):\n raise NotImplementedError()\n\n def execute(self, compiled_node, manifest):\n raise NotImplementedError()\n\n def run(self, compiled_node, manifest):\n return self.execute(compiled_node, manifest)\n\n def after_execute(self, result):\n raise NotImplementedError()\n\n def _skip_caused_by_ephemeral_failure(self):\n if self.skip_cause is None or self.skip_cause.node is None:\n return False\n return self.skip_cause.node.is_ephemeral_model\n\n def on_skip(self):\n schema_name = self.node.schema\n node_name = self.node.name\n\n error_message = None\n if not self.node.is_ephemeral_model:\n # if this model was skipped due to an upstream ephemeral model\n # failure, print a special 'error skip' message.\n if self._skip_caused_by_ephemeral_failure():\n fire_event(\n LogSkipBecauseError(\n schema=schema_name,\n relation=node_name,\n index=self.node_index,\n total=self.num_nodes,\n )\n )\n print_run_result_error(result=self.skip_cause, newline=False)\n if self.skip_cause is None: # mypy appeasement\n raise DbtInternalError(\n \"Skip cause not set but skip was somehow caused by an ephemeral failure\"\n )\n # set an error so dbt will exit with an error code\n error_message = (\n \"Compilation Error in {}, caused by compilation error \"\n \"in referenced ephemeral model {}\".format(\n self.node.unique_id, self.skip_cause.node.unique_id\n )\n )\n else:\n # 'skipped' nodes should not have a value for 'node_finished_at'\n # they do have 'node_started_at', which is set in GraphRunnableTask.call_runner\n self.node.update_event_status(node_status=RunStatus.Skipped)\n fire_event(\n SkippingDetails(\n resource_type=self.node.resource_type,\n schema=schema_name,\n node_name=node_name,\n index=self.node_index,\n total=self.num_nodes,\n node_info=self.node.node_info,\n )\n )\n\n node_result = RunResult.from_node(self.node, RunStatus.Skipped, error_message)\n return node_result\n\n def do_skip(self, cause=None):\n self.skip = True\n self.skip_cause = cause\n", "path": "core/dbt/task/base.py" } ]
diff --git a/.changes/unreleased/Under the Hood-20230906-164901.yaml b/.changes/unreleased/Under the Hood-20230906-164901.yaml new file mode 100644 index 00000000000..f309a24ccc3 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230906-164901.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Add typing to __init__ in base.py +time: 2023-09-06T16:49:01.150713+01:00 +custom: + Author: aranke + Issue: "8398" diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 0aae0bd8851..3e7d7544578 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -193,7 +193,7 @@ def __init__(self, node): class BaseRunner(metaclass=ABCMeta): - def __init__(self, config, adapter, node, node_index, num_nodes): + def __init__(self, config, adapter, node, node_index, num_nodes) -> None: self.config = config self.adapter = adapter self.node = node
piskvorky__gensim-3441
annoy.py conversion of cosine distance to cosine similarity is incorrect in [this function](https://github.com/RaRe-Technologies/gensim/blob/f35faae7a7b0c3c8586fb61208560522e37e0e7e/gensim/similarities/annoy.py#L169) the code to calculate cosine similarity is incorrect def most_similar(self, vector, num_neighbors): """Find `num_neighbors` most similar items. Parameters ---------- vector : numpy.array Vector for word/document. num_neighbors : int Number of most similar items Returns ------- list of (str, float) List of most similar items in format [(`item`, `cosine_distance`), ... ] """ ids, distances = self.index.get_nns_by_vector( vector, num_neighbors, include_distances=True) return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))] according to annoy documentation `get_nns_by_vector` with `include_distances=True` will return the distances and not the square power of the distance (this was changed since aug 2016): _`a.get_distance(i, j)` returns the distance between items i and j. NOTE: this used to return the squared distance, but has been changed as of Aug 2016._ [link](https://github.com/spotify/annoy#:~:text=a.get_distance(i%2C%20j)%20returns%20the%20distance%20between%20items%20i%20and%20j.%20NOTE%3A%20this%20used%20to%20return%20the%20squared%20distance%2C%20but%20has%20been%20changed%20as%20of%20Aug%202016.) also: Annoy uses Euclidean distance of normalized vectors for its angular distance, which for two vectors u,v is equal to sqrt(2(1-cos(u,v))) [link](https://github.com/spotify/annoy#:~:text=Annoy%20uses%20Euclidean%20distance%20of%20normalized%20vectors%20for%20its%20angular%20distance%2C%20which%20for%20two%20vectors%20u%2Cv%20is%20equal%20to%20sqrt(2(1%2Dcos(u%2Cv)))) so this means that in order to calculate the cosine similarity correctly we should do this: ` return [(self.labels[ids[i]], 1 - distances[i]^2 / 2) for i in range(len(ids))] `
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)\nlibrary with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.\n\n.. Important::\n To use this module, you must have the ``annoy`` library installed.\n To install it, run ``pip install annoy``.\n\n\"\"\"\n\n# Avoid import collisions on py2: this module has the same name as the actual Annoy library.\nfrom __future__ import absolute_import\n\nimport os\n\ntry:\n import cPickle as _pickle\nexcept ImportError:\n import pickle as _pickle\n\nfrom gensim import utils\nfrom gensim.models.doc2vec import Doc2Vec\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.fasttext import FastText\nfrom gensim.models import KeyedVectors\n\n\n_NOANNOY = ImportError(\"Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.\")\n\n\nclass AnnoyIndexer():\n \"\"\"This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)\n vector retrieval in `most_similar()` calls of\n :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.\n\n \"\"\"\n\n def __init__(self, model=None, num_trees=None):\n \"\"\"\n Parameters\n ----------\n model : trained model, optional\n Use vectors from this model as the source for the index.\n num_trees : int, optional\n Number of trees for Annoy indexer.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.annoy import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> model.most_similar(\"cat\", topn=2, indexer=indexer)\n [('cat', 1.0), ('dog', 0.32011348009109497)]\n\n \"\"\"\n self.index = None\n self.labels = None\n self.model = model\n self.num_trees = num_trees\n\n if model and num_trees:\n # Extract the KeyedVectors object from whatever model we were given.\n if isinstance(self.model, Doc2Vec):\n kv = self.model.dv\n elif isinstance(self.model, (Word2Vec, FastText)):\n kv = self.model.wv\n elif isinstance(self.model, (KeyedVectors,)):\n kv = self.model\n else:\n raise ValueError(\"Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used\")\n self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)\n\n def save(self, fname, protocol=utils.PICKLE_PROTOCOL):\n \"\"\"Save AnnoyIndexer instance to disk.\n\n Parameters\n ----------\n fname : str\n Path to output. Save will produce 2 files:\n `fname`: Annoy index itself.\n `fname.dict`: Index metadata.\n protocol : int, optional\n Protocol for pickle.\n\n Notes\n -----\n This method saves **only the index**. The trained model isn't preserved.\n\n \"\"\"\n self.index.save(fname)\n d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}\n with utils.open(fname + '.dict', 'wb') as fout:\n _pickle.dump(d, fout, protocol=protocol)\n\n def load(self, fname):\n \"\"\"Load an AnnoyIndexer instance from disk.\n\n Parameters\n ----------\n fname : str\n The path as previously used by ``save()``.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.index import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>> from tempfile import mkstemp\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> _, temp_fn = mkstemp()\n >>> indexer.save(temp_fn)\n >>>\n >>> new_indexer = AnnoyIndexer()\n >>> new_indexer.load(temp_fn)\n >>> new_indexer.model = model\n\n \"\"\"\n fname_dict = fname + '.dict'\n if not (os.path.exists(fname) and os.path.exists(fname_dict)):\n raise IOError(\n f\"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state.\"\n )\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n with utils.open(fname_dict, 'rb') as f:\n d = _pickle.loads(f.read())\n self.num_trees = d['num_trees']\n self.index = AnnoyIndex(d['f'], metric='angular')\n self.index.load(fname)\n self.labels = d['labels']\n\n def _build_from_model(self, vectors, labels, num_features):\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n index = AnnoyIndex(num_features, metric='angular')\n\n for vector_num, vector in enumerate(vectors):\n index.add_item(vector_num, vector)\n\n index.build(self.num_trees)\n self.index = index\n self.labels = labels\n\n def most_similar(self, vector, num_neighbors):\n \"\"\"Find `num_neighbors` most similar items.\n\n Parameters\n ----------\n vector : numpy.array\n Vector for word/document.\n num_neighbors : int\n Number of most similar items\n\n Returns\n -------\n list of (str, float)\n List of most similar items in format [(`item`, `cosine_distance`), ... ]\n\n \"\"\"\n ids, distances = self.index.get_nns_by_vector(\n vector, num_neighbors, include_distances=True)\n\n return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]\n", "path": "gensim/similarities/annoy.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)\nlibrary with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.\n\n.. Important::\n To use this module, you must have the ``annoy`` library installed.\n To install it, run ``pip install annoy``.\n\n\"\"\"\n\n# Avoid import collisions on py2: this module has the same name as the actual Annoy library.\nfrom __future__ import absolute_import\n\nimport os\n\ntry:\n import cPickle as _pickle\nexcept ImportError:\n import pickle as _pickle\n\nfrom gensim import utils\nfrom gensim.models.doc2vec import Doc2Vec\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.fasttext import FastText\nfrom gensim.models import KeyedVectors\n\n\n_NOANNOY = ImportError(\"Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.\")\n\n\nclass AnnoyIndexer():\n \"\"\"This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)\n vector retrieval in `most_similar()` calls of\n :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.\n\n \"\"\"\n\n def __init__(self, model=None, num_trees=None):\n \"\"\"\n Parameters\n ----------\n model : trained model, optional\n Use vectors from this model as the source for the index.\n num_trees : int, optional\n Number of trees for Annoy indexer.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.annoy import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> model.most_similar(\"cat\", topn=2, indexer=indexer)\n [('cat', 1.0), ('dog', 0.32011348009109497)]\n\n \"\"\"\n self.index = None\n self.labels = None\n self.model = model\n self.num_trees = num_trees\n\n if model and num_trees:\n # Extract the KeyedVectors object from whatever model we were given.\n if isinstance(self.model, Doc2Vec):\n kv = self.model.dv\n elif isinstance(self.model, (Word2Vec, FastText)):\n kv = self.model.wv\n elif isinstance(self.model, (KeyedVectors,)):\n kv = self.model\n else:\n raise ValueError(\"Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used\")\n self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)\n\n def save(self, fname, protocol=utils.PICKLE_PROTOCOL):\n \"\"\"Save AnnoyIndexer instance to disk.\n\n Parameters\n ----------\n fname : str\n Path to output. Save will produce 2 files:\n `fname`: Annoy index itself.\n `fname.dict`: Index metadata.\n protocol : int, optional\n Protocol for pickle.\n\n Notes\n -----\n This method saves **only the index**. The trained model isn't preserved.\n\n \"\"\"\n self.index.save(fname)\n d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}\n with utils.open(fname + '.dict', 'wb') as fout:\n _pickle.dump(d, fout, protocol=protocol)\n\n def load(self, fname):\n \"\"\"Load an AnnoyIndexer instance from disk.\n\n Parameters\n ----------\n fname : str\n The path as previously used by ``save()``.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.index import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>> from tempfile import mkstemp\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> _, temp_fn = mkstemp()\n >>> indexer.save(temp_fn)\n >>>\n >>> new_indexer = AnnoyIndexer()\n >>> new_indexer.load(temp_fn)\n >>> new_indexer.model = model\n\n \"\"\"\n fname_dict = fname + '.dict'\n if not (os.path.exists(fname) and os.path.exists(fname_dict)):\n raise IOError(\n f\"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state.\"\n )\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n with utils.open(fname_dict, 'rb') as f:\n d = _pickle.loads(f.read())\n self.num_trees = d['num_trees']\n self.index = AnnoyIndex(d['f'], metric='angular')\n self.index.load(fname)\n self.labels = d['labels']\n\n def _build_from_model(self, vectors, labels, num_features):\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n index = AnnoyIndex(num_features, metric='angular')\n\n for vector_num, vector in enumerate(vectors):\n index.add_item(vector_num, vector)\n\n index.build(self.num_trees)\n self.index = index\n self.labels = labels\n\n def most_similar(self, vector, num_neighbors):\n \"\"\"Find `num_neighbors` most similar items.\n\n Parameters\n ----------\n vector : numpy.array\n Vector for word/document.\n num_neighbors : int\n Number of most similar items\n\n Returns\n -------\n list of (str, float)\n List of most similar items in format [(`item`, `cosine_distance`), ... ]\n\n \"\"\"\n ids, distances = self.index.get_nns_by_vector(\n vector, num_neighbors, include_distances=True)\n\n return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]\n", "path": "gensim/similarities/annoy.py" } ]
diff --git a/gensim/similarities/annoy.py b/gensim/similarities/annoy.py index e586b2d2e3..688985ca51 100644 --- a/gensim/similarities/annoy.py +++ b/gensim/similarities/annoy.py @@ -185,4 +185,4 @@ def most_similar(self, vector, num_neighbors): ids, distances = self.index.get_nns_by_vector( vector, num_neighbors, include_distances=True) - return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))] + return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]
ludwig-ai__ludwig-1056
Use openpyxl instead of xlrd for reading newer Excel files With the release of xlrd 2, support for newer Excel formats like xlsx has been dropped. It is recommended to use openpyxl instead and to only use xlrd for legacy Excel formats. See: https://stackoverflow.com/questions/65254535/xlrd-biffh-xlrderror-excel-xlsx-file-not-supported
[ { "content": "#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport collections\nimport csv\nimport functools\nimport json\nimport logging\nimport os.path\nimport pickle\nimport random\nimport re\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import ParserError\nfrom sklearn.model_selection import KFold\n\nfrom ludwig.constants import PREPROCESSING, SPLIT, PROC_COLUMN\nfrom ludwig.globals import (MODEL_HYPERPARAMETERS_FILE_NAME,\n MODEL_WEIGHTS_FILE_NAME,\n TRAIN_SET_METADATA_FILE_NAME)\n\nlogger = logging.getLogger(__name__)\n\nDATASET_SPLIT_URL = 'dataset_{}_fp'\nDATA_PROCESSED_CACHE_DIR = 'data_processed_cache_dir'\nDATA_TRAIN_HDF5_FP = 'data_train_hdf5_fp'\nHDF5_COLUMNS_KEY = 'columns'\nDICT_FORMATS = {'dict', 'dictionary', dict}\nDATAFRAME_FORMATS = {'dataframe', 'df', pd.DataFrame}\nCSV_FORMATS = {'csv'}\nTSV_FORMATS = {'tsv'}\nJSON_FORMATS = {'json'}\nJSONL_FORMATS = {'jsonl'}\nEXCEL_FORMATS = {'excel'}\nPARQUET_FORMATS = {'parquet'}\nPICKLE_FORMATS = {'pickle'}\nFEATHER_FORMATS = {'feather'}\nFWF_FORMATS = {'fwf'}\nHTML_FORMATS = {'html'}\nORC_FORMATS = {'orc'}\nSAS_FORMATS = {'sas'}\nSPSS_FORMATS = {'spss'}\nSTATA_FORMATS = {'stata'}\nHDF5_FORMATS = {'hdf5', 'h5'}\nCACHEABLE_FORMATS = set.union(*(CSV_FORMATS, TSV_FORMATS,\n JSON_FORMATS, JSONL_FORMATS,\n EXCEL_FORMATS, PARQUET_FORMATS, PICKLE_FORMATS,\n FEATHER_FORMATS, FWF_FORMATS, HTML_FORMATS,\n ORC_FORMATS, SAS_FORMATS, SPSS_FORMATS,\n STATA_FORMATS))\n\nPANDAS_DF = pd\n\n\ndef get_split_path(dataset_fp):\n return os.path.splitext(dataset_fp)[0] + '.split.csv'\n\n\ndef get_abs_path(data_csv_path, file_path):\n if data_csv_path is not None:\n return os.path.join(data_csv_path, file_path)\n else:\n return file_path\n\n\ndef load_csv(data_fp):\n data = []\n with open(data_fp, 'rb') as f:\n data = list(csv.reader(f))\n return data\n\n\ndef read_xsv(data_fp, df_lib=PANDAS_DF, separator=',', header=0, nrows=None, skiprows=None):\n \"\"\"\n Helper method to read a csv file. Wraps around pd.read_csv to handle some\n exceptions. Can extend to cover cases as necessary\n :param data_fp: path to the xsv file\n :param df_lib: DataFrame library used to read in the CSV\n :param separator: defaults separator to use for splitting\n :param header: header argument for pandas to read the csv\n :param nrows: number of rows to read from the csv, None means all\n :param skiprows: number of rows to skip from the csv, None means no skips\n :return: Pandas dataframe with the data\n \"\"\"\n with open(data_fp, 'r', encoding=\"utf8\") as csvfile:\n try:\n dialect = csv.Sniffer().sniff(csvfile.read(1024 * 100),\n delimiters=[',', '\\t', '|'])\n separator = dialect.delimiter\n except csv.Error:\n # Could not conclude the delimiter, defaulting to user provided\n pass\n\n try:\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n nrows=nrows, skiprows=skiprows)\n except ParserError:\n logger.warning('Failed to parse the CSV with pandas default way,'\n ' trying \\\\ as escape character.')\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n escapechar='\\\\',\n nrows=nrows, skiprows=skiprows)\n\n return df\n\n\nread_csv = functools.partial(read_xsv, separator=',')\nread_tsv = functools.partial(read_xsv, separator='\\t')\n\n\ndef read_json(data_fp, df_lib, normalize=False):\n if normalize:\n return df_lib.json_normalize(load_json(data_fp))\n else:\n return df_lib.read_json(data_fp)\n\n\ndef read_jsonl(data_fp, df_lib):\n return df_lib.read_json(data_fp, lines=True)\n\n\ndef read_excel(data_fp, df_lib):\n return df_lib.read_excel(data_fp)\n\n\ndef read_parquet(data_fp, df_lib):\n return df_lib.read_parquet(data_fp)\n\n\ndef read_pickle(data_fp, df_lib):\n return df_lib.read_pickle(data_fp)\n\n\ndef read_fwf(data_fp, df_lib):\n return df_lib.read_fwf(data_fp)\n\n\ndef read_feather(data_fp, df_lib):\n return df_lib.read_feather(data_fp)\n\n\ndef read_html(data_fp, df_lib):\n return df_lib.read_html(data_fp)[0]\n\n\ndef read_orc(data_fp, df_lib):\n return df_lib.read_orc(data_fp)\n\n\ndef read_sas(data_fp, df_lib):\n return df_lib.read_sas(data_fp)\n\n\ndef read_spss(data_fp, df_lib):\n return df_lib.read_spss(data_fp)\n\n\ndef read_stata(data_fp, df_lib):\n return df_lib.read_stata(data_fp)\n\n\ndef save_csv(data_fp, data):\n with open(data_fp, 'w', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n if not isinstance(row, collections.Iterable) or isinstance(row,\n str):\n row = [row]\n writer.writerow(row)\n\n\ndef csv_contains_column(data_fp, column_name):\n return column_name in read_csv(data_fp, nrows=0) # only loads header\n\n\ndef load_json(data_fp):\n with open(data_fp, 'r') as input_file:\n data = json.load(input_file)\n return data\n\n\ndef save_json(data_fp, data, sort_keys=True, indent=4):\n with open(data_fp, 'w') as output_file:\n json.dump(data, output_file, cls=NumpyEncoder, sort_keys=sort_keys,\n indent=indent)\n\n\ndef to_numpy_dataset(df):\n dataset = {}\n for col in df.columns:\n dataset[col] = np.stack(df[col].to_numpy())\n return dataset\n\n\ndef from_numpy_dataset(dataset):\n col_mapping = {}\n for k, v in dataset.items():\n if len(v.shape) > 1:\n # unstacking, needed for ndarrays of dimension 2 and more\n *vals, = v\n else:\n # not unstacking. Needed because otherwise pandas casts types\n # the way it wants, like converting a list of float32 scalats\n # to a column of float64\n vals = v\n col_mapping[k] = vals\n return pd.DataFrame.from_dict(col_mapping)\n\n\ndef save_hdf5(data_fp, data):\n mode = 'w'\n if os.path.isfile(data_fp):\n mode = 'r+'\n\n numpy_dataset = to_numpy_dataset(data)\n with h5py.File(data_fp, mode) as h5_file:\n h5_file.create_dataset(HDF5_COLUMNS_KEY, data=np.array(data.columns.values, dtype='S'))\n for column in data.columns:\n h5_file.create_dataset(column, data=numpy_dataset[column])\n\n\ndef load_hdf5(data_fp):\n hdf5_data = h5py.File(data_fp, 'r')\n columns = [s.decode('utf-8') for s in hdf5_data[HDF5_COLUMNS_KEY][()].tolist()]\n\n numpy_dataset = {}\n for column in columns:\n numpy_dataset[column] = hdf5_data[column][()]\n\n return from_numpy_dataset(numpy_dataset)\n\n\ndef load_object(object_fp):\n with open(object_fp, 'rb') as f:\n return pickle.load(f)\n\n\ndef save_object(object_fp, obj):\n with open(object_fp, 'wb') as f:\n pickle.dump(obj, f)\n\n\ndef load_array(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for x in input_file:\n list_num.append(dtype(x.strip()))\n return np.array(list_num)\n\n\ndef load_matrix(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for row in input_file:\n list_num.append([dtype(elem) for elem in row.strip().split()])\n return np.squeeze(np.array(list_num))\n\n\ndef save_array(data_fp, array):\n with open(data_fp, 'w') as output_file:\n for x in np.nditer(array):\n output_file.write(str(x) + '\\n')\n\n\ndef load_pretrained_embeddings(embeddings_path, vocab):\n embeddings = load_glove(embeddings_path)\n\n # find out the size of the embeddings\n embeddings_size = len(next(iter(embeddings.values())))\n\n # calculate an average embedding, to use for initializing missing words\n avg_embedding = np.zeros(embeddings_size)\n count = 0\n for word in vocab:\n if word in embeddings:\n avg_embedding += embeddings[word]\n count += 1\n if count > 0:\n avg_embedding /= count\n\n # create the embedding matrix\n embeddings_vectors = []\n for word in vocab:\n if word in embeddings:\n embeddings_vectors.append(embeddings[word])\n else:\n embeddings_vectors.append(\n avg_embedding + np.random.uniform(-0.01, 0.01, embeddings_size)\n )\n embeddings_matrix = np.stack(embeddings_vectors)\n\n # let's help the garbage collector free some memory\n embeddings = None\n\n return embeddings_matrix\n\n\[email protected]_cache(1)\ndef load_glove(file_path):\n logger.info(' Loading Glove format file {}'.format(file_path))\n embeddings = {}\n embedding_size = 0\n\n # collect embeddings size assuming the first line is correct\n with open(file_path, 'r', encoding='utf-8') as f:\n found_line = False\n while not found_line:\n line = f.readline()\n if line:\n embedding_size = len(line.split()) - 1\n found_line = True\n\n # collect embeddings\n with open(file_path, 'r', encoding='utf-8') as f:\n for line_number, line in enumerate(f):\n if line:\n try:\n split = line.split()\n if len(split) != embedding_size + 1:\n raise ValueError\n word = split[0]\n embedding = np.array(\n [float(val) for val in split[-embedding_size:]]\n )\n embeddings[word] = embedding\n except ValueError:\n logger.warning(\n 'Line {} in the GloVe file {} is malformed, '\n 'skipping it'.format(\n line_number, file_path\n )\n )\n logger.info(' {0} embeddings loaded'.format(len(embeddings)))\n return embeddings\n\n\ndef split_data(split, data):\n # type: (float, list) -> (list, list)\n split_length = int(round(split * len(data)))\n random.shuffle(data)\n return data[:split_length], data[split_length:]\n\n\ndef shuffle_unison_inplace(list_of_lists, random_state=None):\n if list_of_lists:\n assert all(len(l) == len(list_of_lists[0]) for l in list_of_lists)\n if random_state is not None:\n p = random_state.permutation(len(list_of_lists[0]))\n else:\n p = np.random.permutation(len(list_of_lists[0]))\n return [l[p] for l in list_of_lists]\n return None\n\n\ndef shuffle_dict_unison_inplace(np_dict, random_state=None):\n keys = list(np_dict.keys())\n list_of_lists = list(np_dict.values())\n\n # shuffle up the list of lists according to previous fct\n shuffled_list = shuffle_unison_inplace(list_of_lists, random_state)\n\n recon = {}\n for ii in range(len(keys)):\n dkey = keys[ii]\n recon[dkey] = shuffled_list[ii]\n\n # we've shuffled the dictionary in place!\n return recon\n\n\ndef split_dataset_ttv(dataset, split):\n training_set = split_dataset(dataset, split, 0)\n validation_set = split_dataset(dataset, split, 1)\n test_set = split_dataset(dataset, split, 2)\n return training_set, test_set, validation_set\n\n\ndef split_dataset(dataset, split, value_to_split=0):\n split_df = dataset[dataset[split] == value_to_split]\n if len(split_df) == 0:\n return None\n return split_df.reset_index()\n\n\ndef collapse_rare_labels(labels, labels_limit):\n if labels_limit > 0:\n labels[labels >= labels_limit] = labels_limit\n return labels\n\n\ndef class_counts(dataset, labels_field):\n return np.bincount(dataset[labels_field].flatten()).tolist()\n\n\ndef text_feature_data_field(text_feature):\n return text_feature[PROC_COLUMN] + '_' + text_feature['level']\n\n\ndef load_from_file(file_name, field=None, dtype=int, ground_truth_split=2):\n \"\"\"Load experiment data from supported file formats.\n\n Experiment data can be test/train statistics, model predictions,\n probability, ground truth, ground truth metadata.\n :param file_name: Path to file to be loaded\n :param field: Target Prediction field.\n :param dtype:\n :param ground_truth_split: Ground truth split filter where 0 is train 1 is\n validation and 2 is test split. By default test split is used when loading\n ground truth from hdf5.\n :return: Experiment data as array\n \"\"\"\n if file_name.endswith('.hdf5') and field is not None:\n dataset = pd.read_hdf(file_name, key=HDF5_COLUMNS_KEY)\n column = dataset[field]\n array = column[dataset[SPLIT] == ground_truth_split].values # ground truth\n elif file_name.endswith('.npy'):\n array = np.load(file_name)\n elif file_name.endswith('.csv'):\n array = read_csv(file_name, header=None).values\n else:\n array = load_matrix(file_name, dtype)\n return array\n\n\ndef replace_file_extension(file_path, extension):\n \"\"\"\n Return a file path for a file with same name but different format.\n a.csv, json -> a.json\n a.csv, hdf5 -> a.hdf5\n :param file_path: original file path\n :param extension: file extension\n :return: file path with same name but different format\n \"\"\"\n if file_path is None:\n return None\n extension = extension.strip()\n if extension.startswith('.'):\n # Handle the case if the user calls with '.hdf5' instead of 'hdf5'\n extension = extension[1:]\n\n return os.path.splitext(file_path)[0] + '.' + extension\n\n\ndef file_exists_with_diff_extension(file_path, extension):\n return file_path is None or \\\n os.path.isfile(replace_file_extension(file_path, extension))\n\n\ndef add_sequence_feature_column(df, col_name, seq_length):\n \"\"\"\n Adds a new column to the dataframe computed from an existing column.\n Values in the new column are space-delimited strings composed of preceding\n values of the same column up to seq_length.\n For example values of the i-th row of the new column will be a\n space-delimited string of df[col_name][i-seq_length].\n :param df: input dataframe\n :param col_name: column name containing sequential data\n :param seq_length: length of an array of preceeding column values to use\n \"\"\"\n if col_name not in df.columns.values:\n logger.error('{} column does not exist'.format(col_name))\n return\n\n new_col_name = col_name + '_feature'\n if new_col_name in df.columns.values:\n logger.warning(\n '{} column already exists, values will be overridden'.format(\n new_col_name\n )\n )\n\n new_data = [None] * seq_length\n old_data = np.array(df[col_name])\n\n for i in range(seq_length, len(df)):\n new_data.append(' '.join(\n str(j) for j in old_data[i - seq_length: i]\n ))\n\n df[new_col_name] = new_data\n df[new_col_name] = df[new_col_name].fillna(method='backfill')\n\n\ndef override_in_memory_flag(input_features, override_value):\n num_overrides = 0\n for feature in input_features:\n if PREPROCESSING in feature:\n if 'in_memory' in feature[PREPROCESSING]:\n feature[PREPROCESSING]['in_memory'] = override_value\n num_overrides += 1\n return num_overrides\n\n\ndef normalize_numpy(obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return normalize_numpy(obj.tolist())\n elif isinstance(obj, list):\n return [normalize_numpy(v) for v in obj]\n else:\n return obj\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, set):\n return list(obj)\n elif isinstance(obj, tuple):\n return list(obj)\n elif isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef generate_kfold_splits(data_df, num_folds, random_state):\n kf = KFold(n_splits=num_folds, shuffle=True, random_state=random_state)\n fold_num = 0\n for train_indices, test_indices in kf.split(data_df):\n fold_num += 1\n yield train_indices, test_indices, fold_num\n\n\ndef get_path_size(\n start_path,\n regex_accept=None,\n regex_reject=None\n):\n total_size = 0\n pattern_accept = re.compile(regex_accept) if regex_accept else None\n pattern_reject = re.compile(regex_reject) if regex_reject else None\n\n for dirpath, dirnames, filenames in os.walk(start_path):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n if not os.path.islink(filepath):\n accepted = True\n if pattern_accept:\n accepted = accepted and pattern_accept.match(filename)\n if pattern_reject:\n accepted = accepted and not pattern_reject.match(filename)\n if accepted:\n total_size += os.path.getsize(filepath)\n\n return total_size\n\n\ndef clear_data_cache():\n \"\"\"Clears any cached data objects (e.g., embeddings)\"\"\"\n load_glove.cache_clear()\n\n\ndef figure_data_format_dataset(dataset):\n if isinstance(dataset, pd.DataFrame):\n return pd.DataFrame\n elif isinstance(dataset, dict):\n return dict\n elif isinstance(dataset, str):\n dataset = dataset.lower()\n if dataset.endswith('.csv'):\n return 'csv'\n elif dataset.endswith('.tsv'):\n return 'tsv'\n elif dataset.endswith('.json'):\n return 'json'\n elif dataset.endswith('.jsonl'):\n return 'jsonl'\n elif (dataset.endswith('.xls') or dataset.endswith('.xlsx') or\n dataset.endswith('.xlsm') or dataset.endswith('.xlsb') or\n dataset.endswith('.odf') or dataset.endswith('.ods') or\n dataset.endswith('.odt')):\n return 'excel'\n elif dataset.endswith('.parquet'):\n return 'parquet'\n elif dataset.endswith('.pickle') or dataset.endswith('.p'):\n return 'pickle'\n elif dataset.endswith('.feather'):\n return 'feather'\n elif dataset.endswith('.fwf'):\n return 'fwf'\n elif dataset.endswith('.html'):\n return 'html'\n elif dataset.endswith('.orc'):\n return 'orc'\n elif dataset.endswith('.sas'):\n return 'sas'\n elif dataset.endswith('.spss'):\n return 'spss'\n elif dataset.endswith('.dta') or dataset.endswith('.stata'):\n return 'stata'\n elif dataset.endswith('.h5') or dataset.endswith('.hdf5'):\n return 'hdf5'\n else:\n raise ValueError(\n \"Dataset path string {} \"\n \"does not contain a valid extension\".format(dataset)\n )\n else:\n raise ValueError(\n \"Cannot figure out the format of dataset {}\".format(dataset)\n )\n\n\ndef figure_data_format(\n dataset=None, training_set=None, validation_set=None, test_set=None\n):\n if dataset is not None:\n data_format = figure_data_format_dataset(dataset)\n elif training_set is not None:\n data_formats = [figure_data_format_dataset(training_set)]\n if validation_set is not None:\n data_formats.append(figure_data_format_dataset(validation_set))\n if test_set is not None:\n data_formats.append(figure_data_format_dataset(test_set))\n data_formats_set = set(data_formats)\n if len(data_formats_set) > 1:\n error_message = \"Datasets have different formats. Training: \"\n error_message += str(data_formats[0])\n if validation_set:\n error_message = \", Validation: \"\n error_message += str(data_formats[1])\n if test_set:\n error_message = \", Test: \"\n error_message += str(data_formats[-1])\n raise ValueError(error_message)\n else:\n data_format = next(iter(data_formats_set))\n else:\n raise ValueError(\n \"At least one between dataset and training_set must be not None\"\n )\n return data_format\n\n\ndef is_model_dir(path: str) -> bool:\n hyperparameters_fn = os.path.join(path, MODEL_HYPERPARAMETERS_FILE_NAME)\n ts_metadata_fn = os.path.join(path, TRAIN_SET_METADATA_FILE_NAME)\n is_model_dir = False\n if (os.path.isdir(path)\n and os.path.isfile(hyperparameters_fn)\n and os.path.isfile(ts_metadata_fn)):\n weights_files_count = 0\n for file_name in os.listdir(path):\n if file_name.startswith(MODEL_WEIGHTS_FILE_NAME):\n weights_files_count += 1\n if weights_files_count >= 2:\n is_model_dir = True\n return is_model_dir\n\n\nexternal_data_reader_registry = {\n **{fmt: read_csv for fmt in CSV_FORMATS},\n **{fmt: read_tsv for fmt in TSV_FORMATS},\n **{fmt: read_json for fmt in JSON_FORMATS},\n **{fmt: read_jsonl for fmt in JSONL_FORMATS},\n **{fmt: read_excel for fmt in EXCEL_FORMATS},\n **{fmt: read_parquet for fmt in PARQUET_FORMATS},\n **{fmt: read_pickle for fmt in PICKLE_FORMATS},\n **{fmt: read_fwf for fmt in FWF_FORMATS},\n **{fmt: read_feather for fmt in FEATHER_FORMATS},\n **{fmt: read_html for fmt in HTML_FORMATS},\n **{fmt: read_orc for fmt in ORC_FORMATS},\n **{fmt: read_sas for fmt in SAS_FORMATS},\n **{fmt: read_spss for fmt in SPSS_FORMATS},\n **{fmt: read_stata for fmt in STATA_FORMATS}\n}\n", "path": "ludwig/utils/data_utils.py" } ]
[ { "content": "#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport collections\nimport csv\nimport functools\nimport json\nimport logging\nimport os.path\nimport pickle\nimport random\nimport re\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import ParserError\nfrom sklearn.model_selection import KFold\n\nfrom ludwig.constants import PREPROCESSING, SPLIT, PROC_COLUMN\nfrom ludwig.globals import (MODEL_HYPERPARAMETERS_FILE_NAME,\n MODEL_WEIGHTS_FILE_NAME,\n TRAIN_SET_METADATA_FILE_NAME)\n\nlogger = logging.getLogger(__name__)\n\nDATASET_SPLIT_URL = 'dataset_{}_fp'\nDATA_PROCESSED_CACHE_DIR = 'data_processed_cache_dir'\nDATA_TRAIN_HDF5_FP = 'data_train_hdf5_fp'\nHDF5_COLUMNS_KEY = 'columns'\nDICT_FORMATS = {'dict', 'dictionary', dict}\nDATAFRAME_FORMATS = {'dataframe', 'df', pd.DataFrame}\nCSV_FORMATS = {'csv'}\nTSV_FORMATS = {'tsv'}\nJSON_FORMATS = {'json'}\nJSONL_FORMATS = {'jsonl'}\nEXCEL_FORMATS = {'excel'}\nPARQUET_FORMATS = {'parquet'}\nPICKLE_FORMATS = {'pickle'}\nFEATHER_FORMATS = {'feather'}\nFWF_FORMATS = {'fwf'}\nHTML_FORMATS = {'html'}\nORC_FORMATS = {'orc'}\nSAS_FORMATS = {'sas'}\nSPSS_FORMATS = {'spss'}\nSTATA_FORMATS = {'stata'}\nHDF5_FORMATS = {'hdf5', 'h5'}\nCACHEABLE_FORMATS = set.union(*(CSV_FORMATS, TSV_FORMATS,\n JSON_FORMATS, JSONL_FORMATS,\n EXCEL_FORMATS, PARQUET_FORMATS, PICKLE_FORMATS,\n FEATHER_FORMATS, FWF_FORMATS, HTML_FORMATS,\n ORC_FORMATS, SAS_FORMATS, SPSS_FORMATS,\n STATA_FORMATS))\n\nPANDAS_DF = pd\n\n\ndef get_split_path(dataset_fp):\n return os.path.splitext(dataset_fp)[0] + '.split.csv'\n\n\ndef get_abs_path(data_csv_path, file_path):\n if data_csv_path is not None:\n return os.path.join(data_csv_path, file_path)\n else:\n return file_path\n\n\ndef load_csv(data_fp):\n data = []\n with open(data_fp, 'rb') as f:\n data = list(csv.reader(f))\n return data\n\n\ndef read_xsv(data_fp, df_lib=PANDAS_DF, separator=',', header=0, nrows=None, skiprows=None):\n \"\"\"\n Helper method to read a csv file. Wraps around pd.read_csv to handle some\n exceptions. Can extend to cover cases as necessary\n :param data_fp: path to the xsv file\n :param df_lib: DataFrame library used to read in the CSV\n :param separator: defaults separator to use for splitting\n :param header: header argument for pandas to read the csv\n :param nrows: number of rows to read from the csv, None means all\n :param skiprows: number of rows to skip from the csv, None means no skips\n :return: Pandas dataframe with the data\n \"\"\"\n with open(data_fp, 'r', encoding=\"utf8\") as csvfile:\n try:\n dialect = csv.Sniffer().sniff(csvfile.read(1024 * 100),\n delimiters=[',', '\\t', '|'])\n separator = dialect.delimiter\n except csv.Error:\n # Could not conclude the delimiter, defaulting to user provided\n pass\n\n try:\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n nrows=nrows, skiprows=skiprows)\n except ParserError:\n logger.warning('Failed to parse the CSV with pandas default way,'\n ' trying \\\\ as escape character.')\n df = df_lib.read_csv(data_fp, sep=separator, header=header,\n escapechar='\\\\',\n nrows=nrows, skiprows=skiprows)\n\n return df\n\n\nread_csv = functools.partial(read_xsv, separator=',')\nread_tsv = functools.partial(read_xsv, separator='\\t')\n\n\ndef read_json(data_fp, df_lib, normalize=False):\n if normalize:\n return df_lib.json_normalize(load_json(data_fp))\n else:\n return df_lib.read_json(data_fp)\n\n\ndef read_jsonl(data_fp, df_lib):\n return df_lib.read_json(data_fp, lines=True)\n\n\ndef read_excel(data_fp, df_lib):\n fp_split = os.path.splitext(data_fp)\n if fp_split[1] == '.xls':\n excel_engine = 'xlrd'\n else:\n excel_engine = 'openpyxl'\n return df_lib.read_excel(data_fp, engine=excel_engine)\n\n\ndef read_parquet(data_fp, df_lib):\n return df_lib.read_parquet(data_fp)\n\n\ndef read_pickle(data_fp, df_lib):\n return df_lib.read_pickle(data_fp)\n\n\ndef read_fwf(data_fp, df_lib):\n return df_lib.read_fwf(data_fp)\n\n\ndef read_feather(data_fp, df_lib):\n return df_lib.read_feather(data_fp)\n\n\ndef read_html(data_fp, df_lib):\n return df_lib.read_html(data_fp)[0]\n\n\ndef read_orc(data_fp, df_lib):\n return df_lib.read_orc(data_fp)\n\n\ndef read_sas(data_fp, df_lib):\n return df_lib.read_sas(data_fp)\n\n\ndef read_spss(data_fp, df_lib):\n return df_lib.read_spss(data_fp)\n\n\ndef read_stata(data_fp, df_lib):\n return df_lib.read_stata(data_fp)\n\n\ndef save_csv(data_fp, data):\n with open(data_fp, 'w', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n for row in data:\n if not isinstance(row, collections.Iterable) or isinstance(row,\n str):\n row = [row]\n writer.writerow(row)\n\n\ndef csv_contains_column(data_fp, column_name):\n return column_name in read_csv(data_fp, nrows=0) # only loads header\n\n\ndef load_json(data_fp):\n with open(data_fp, 'r') as input_file:\n data = json.load(input_file)\n return data\n\n\ndef save_json(data_fp, data, sort_keys=True, indent=4):\n with open(data_fp, 'w') as output_file:\n json.dump(data, output_file, cls=NumpyEncoder, sort_keys=sort_keys,\n indent=indent)\n\n\ndef to_numpy_dataset(df):\n dataset = {}\n for col in df.columns:\n dataset[col] = np.stack(df[col].to_numpy())\n return dataset\n\n\ndef from_numpy_dataset(dataset):\n col_mapping = {}\n for k, v in dataset.items():\n if len(v.shape) > 1:\n # unstacking, needed for ndarrays of dimension 2 and more\n *vals, = v\n else:\n # not unstacking. Needed because otherwise pandas casts types\n # the way it wants, like converting a list of float32 scalats\n # to a column of float64\n vals = v\n col_mapping[k] = vals\n return pd.DataFrame.from_dict(col_mapping)\n\n\ndef save_hdf5(data_fp, data):\n mode = 'w'\n if os.path.isfile(data_fp):\n mode = 'r+'\n\n numpy_dataset = to_numpy_dataset(data)\n with h5py.File(data_fp, mode) as h5_file:\n h5_file.create_dataset(HDF5_COLUMNS_KEY, data=np.array(data.columns.values, dtype='S'))\n for column in data.columns:\n h5_file.create_dataset(column, data=numpy_dataset[column])\n\n\ndef load_hdf5(data_fp):\n hdf5_data = h5py.File(data_fp, 'r')\n columns = [s.decode('utf-8') for s in hdf5_data[HDF5_COLUMNS_KEY][()].tolist()]\n\n numpy_dataset = {}\n for column in columns:\n numpy_dataset[column] = hdf5_data[column][()]\n\n return from_numpy_dataset(numpy_dataset)\n\n\ndef load_object(object_fp):\n with open(object_fp, 'rb') as f:\n return pickle.load(f)\n\n\ndef save_object(object_fp, obj):\n with open(object_fp, 'wb') as f:\n pickle.dump(obj, f)\n\n\ndef load_array(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for x in input_file:\n list_num.append(dtype(x.strip()))\n return np.array(list_num)\n\n\ndef load_matrix(data_fp, dtype=float):\n list_num = []\n with open(data_fp, 'r') as input_file:\n for row in input_file:\n list_num.append([dtype(elem) for elem in row.strip().split()])\n return np.squeeze(np.array(list_num))\n\n\ndef save_array(data_fp, array):\n with open(data_fp, 'w') as output_file:\n for x in np.nditer(array):\n output_file.write(str(x) + '\\n')\n\n\ndef load_pretrained_embeddings(embeddings_path, vocab):\n embeddings = load_glove(embeddings_path)\n\n # find out the size of the embeddings\n embeddings_size = len(next(iter(embeddings.values())))\n\n # calculate an average embedding, to use for initializing missing words\n avg_embedding = np.zeros(embeddings_size)\n count = 0\n for word in vocab:\n if word in embeddings:\n avg_embedding += embeddings[word]\n count += 1\n if count > 0:\n avg_embedding /= count\n\n # create the embedding matrix\n embeddings_vectors = []\n for word in vocab:\n if word in embeddings:\n embeddings_vectors.append(embeddings[word])\n else:\n embeddings_vectors.append(\n avg_embedding + np.random.uniform(-0.01, 0.01, embeddings_size)\n )\n embeddings_matrix = np.stack(embeddings_vectors)\n\n # let's help the garbage collector free some memory\n embeddings = None\n\n return embeddings_matrix\n\n\[email protected]_cache(1)\ndef load_glove(file_path):\n logger.info(' Loading Glove format file {}'.format(file_path))\n embeddings = {}\n embedding_size = 0\n\n # collect embeddings size assuming the first line is correct\n with open(file_path, 'r', encoding='utf-8') as f:\n found_line = False\n while not found_line:\n line = f.readline()\n if line:\n embedding_size = len(line.split()) - 1\n found_line = True\n\n # collect embeddings\n with open(file_path, 'r', encoding='utf-8') as f:\n for line_number, line in enumerate(f):\n if line:\n try:\n split = line.split()\n if len(split) != embedding_size + 1:\n raise ValueError\n word = split[0]\n embedding = np.array(\n [float(val) for val in split[-embedding_size:]]\n )\n embeddings[word] = embedding\n except ValueError:\n logger.warning(\n 'Line {} in the GloVe file {} is malformed, '\n 'skipping it'.format(\n line_number, file_path\n )\n )\n logger.info(' {0} embeddings loaded'.format(len(embeddings)))\n return embeddings\n\n\ndef split_data(split, data):\n # type: (float, list) -> (list, list)\n split_length = int(round(split * len(data)))\n random.shuffle(data)\n return data[:split_length], data[split_length:]\n\n\ndef shuffle_unison_inplace(list_of_lists, random_state=None):\n if list_of_lists:\n assert all(len(l) == len(list_of_lists[0]) for l in list_of_lists)\n if random_state is not None:\n p = random_state.permutation(len(list_of_lists[0]))\n else:\n p = np.random.permutation(len(list_of_lists[0]))\n return [l[p] for l in list_of_lists]\n return None\n\n\ndef shuffle_dict_unison_inplace(np_dict, random_state=None):\n keys = list(np_dict.keys())\n list_of_lists = list(np_dict.values())\n\n # shuffle up the list of lists according to previous fct\n shuffled_list = shuffle_unison_inplace(list_of_lists, random_state)\n\n recon = {}\n for ii in range(len(keys)):\n dkey = keys[ii]\n recon[dkey] = shuffled_list[ii]\n\n # we've shuffled the dictionary in place!\n return recon\n\n\ndef split_dataset_ttv(dataset, split):\n training_set = split_dataset(dataset, split, 0)\n validation_set = split_dataset(dataset, split, 1)\n test_set = split_dataset(dataset, split, 2)\n return training_set, test_set, validation_set\n\n\ndef split_dataset(dataset, split, value_to_split=0):\n split_df = dataset[dataset[split] == value_to_split]\n if len(split_df) == 0:\n return None\n return split_df.reset_index()\n\n\ndef collapse_rare_labels(labels, labels_limit):\n if labels_limit > 0:\n labels[labels >= labels_limit] = labels_limit\n return labels\n\n\ndef class_counts(dataset, labels_field):\n return np.bincount(dataset[labels_field].flatten()).tolist()\n\n\ndef text_feature_data_field(text_feature):\n return text_feature[PROC_COLUMN] + '_' + text_feature['level']\n\n\ndef load_from_file(file_name, field=None, dtype=int, ground_truth_split=2):\n \"\"\"Load experiment data from supported file formats.\n\n Experiment data can be test/train statistics, model predictions,\n probability, ground truth, ground truth metadata.\n :param file_name: Path to file to be loaded\n :param field: Target Prediction field.\n :param dtype:\n :param ground_truth_split: Ground truth split filter where 0 is train 1 is\n validation and 2 is test split. By default test split is used when loading\n ground truth from hdf5.\n :return: Experiment data as array\n \"\"\"\n if file_name.endswith('.hdf5') and field is not None:\n dataset = pd.read_hdf(file_name, key=HDF5_COLUMNS_KEY)\n column = dataset[field]\n array = column[dataset[SPLIT] == ground_truth_split].values # ground truth\n elif file_name.endswith('.npy'):\n array = np.load(file_name)\n elif file_name.endswith('.csv'):\n array = read_csv(file_name, header=None).values\n else:\n array = load_matrix(file_name, dtype)\n return array\n\n\ndef replace_file_extension(file_path, extension):\n \"\"\"\n Return a file path for a file with same name but different format.\n a.csv, json -> a.json\n a.csv, hdf5 -> a.hdf5\n :param file_path: original file path\n :param extension: file extension\n :return: file path with same name but different format\n \"\"\"\n if file_path is None:\n return None\n extension = extension.strip()\n if extension.startswith('.'):\n # Handle the case if the user calls with '.hdf5' instead of 'hdf5'\n extension = extension[1:]\n\n return os.path.splitext(file_path)[0] + '.' + extension\n\n\ndef file_exists_with_diff_extension(file_path, extension):\n return file_path is None or \\\n os.path.isfile(replace_file_extension(file_path, extension))\n\n\ndef add_sequence_feature_column(df, col_name, seq_length):\n \"\"\"\n Adds a new column to the dataframe computed from an existing column.\n Values in the new column are space-delimited strings composed of preceding\n values of the same column up to seq_length.\n For example values of the i-th row of the new column will be a\n space-delimited string of df[col_name][i-seq_length].\n :param df: input dataframe\n :param col_name: column name containing sequential data\n :param seq_length: length of an array of preceeding column values to use\n \"\"\"\n if col_name not in df.columns.values:\n logger.error('{} column does not exist'.format(col_name))\n return\n\n new_col_name = col_name + '_feature'\n if new_col_name in df.columns.values:\n logger.warning(\n '{} column already exists, values will be overridden'.format(\n new_col_name\n )\n )\n\n new_data = [None] * seq_length\n old_data = np.array(df[col_name])\n\n for i in range(seq_length, len(df)):\n new_data.append(' '.join(\n str(j) for j in old_data[i - seq_length: i]\n ))\n\n df[new_col_name] = new_data\n df[new_col_name] = df[new_col_name].fillna(method='backfill')\n\n\ndef override_in_memory_flag(input_features, override_value):\n num_overrides = 0\n for feature in input_features:\n if PREPROCESSING in feature:\n if 'in_memory' in feature[PREPROCESSING]:\n feature[PREPROCESSING]['in_memory'] = override_value\n num_overrides += 1\n return num_overrides\n\n\ndef normalize_numpy(obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return normalize_numpy(obj.tolist())\n elif isinstance(obj, list):\n return [normalize_numpy(v) for v in obj]\n else:\n return obj\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, set):\n return list(obj)\n elif isinstance(obj, tuple):\n return list(obj)\n elif isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef generate_kfold_splits(data_df, num_folds, random_state):\n kf = KFold(n_splits=num_folds, shuffle=True, random_state=random_state)\n fold_num = 0\n for train_indices, test_indices in kf.split(data_df):\n fold_num += 1\n yield train_indices, test_indices, fold_num\n\n\ndef get_path_size(\n start_path,\n regex_accept=None,\n regex_reject=None\n):\n total_size = 0\n pattern_accept = re.compile(regex_accept) if regex_accept else None\n pattern_reject = re.compile(regex_reject) if regex_reject else None\n\n for dirpath, dirnames, filenames in os.walk(start_path):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n if not os.path.islink(filepath):\n accepted = True\n if pattern_accept:\n accepted = accepted and pattern_accept.match(filename)\n if pattern_reject:\n accepted = accepted and not pattern_reject.match(filename)\n if accepted:\n total_size += os.path.getsize(filepath)\n\n return total_size\n\n\ndef clear_data_cache():\n \"\"\"Clears any cached data objects (e.g., embeddings)\"\"\"\n load_glove.cache_clear()\n\n\ndef figure_data_format_dataset(dataset):\n if isinstance(dataset, pd.DataFrame):\n return pd.DataFrame\n elif isinstance(dataset, dict):\n return dict\n elif isinstance(dataset, str):\n dataset = dataset.lower()\n if dataset.endswith('.csv'):\n return 'csv'\n elif dataset.endswith('.tsv'):\n return 'tsv'\n elif dataset.endswith('.json'):\n return 'json'\n elif dataset.endswith('.jsonl'):\n return 'jsonl'\n elif (dataset.endswith('.xls') or dataset.endswith('.xlsx') or\n dataset.endswith('.xlsm') or dataset.endswith('.xlsb') or\n dataset.endswith('.odf') or dataset.endswith('.ods') or\n dataset.endswith('.odt')):\n return 'excel'\n elif dataset.endswith('.parquet'):\n return 'parquet'\n elif dataset.endswith('.pickle') or dataset.endswith('.p'):\n return 'pickle'\n elif dataset.endswith('.feather'):\n return 'feather'\n elif dataset.endswith('.fwf'):\n return 'fwf'\n elif dataset.endswith('.html'):\n return 'html'\n elif dataset.endswith('.orc'):\n return 'orc'\n elif dataset.endswith('.sas'):\n return 'sas'\n elif dataset.endswith('.spss'):\n return 'spss'\n elif dataset.endswith('.dta') or dataset.endswith('.stata'):\n return 'stata'\n elif dataset.endswith('.h5') or dataset.endswith('.hdf5'):\n return 'hdf5'\n else:\n raise ValueError(\n \"Dataset path string {} \"\n \"does not contain a valid extension\".format(dataset)\n )\n else:\n raise ValueError(\n \"Cannot figure out the format of dataset {}\".format(dataset)\n )\n\n\ndef figure_data_format(\n dataset=None, training_set=None, validation_set=None, test_set=None\n):\n if dataset is not None:\n data_format = figure_data_format_dataset(dataset)\n elif training_set is not None:\n data_formats = [figure_data_format_dataset(training_set)]\n if validation_set is not None:\n data_formats.append(figure_data_format_dataset(validation_set))\n if test_set is not None:\n data_formats.append(figure_data_format_dataset(test_set))\n data_formats_set = set(data_formats)\n if len(data_formats_set) > 1:\n error_message = \"Datasets have different formats. Training: \"\n error_message += str(data_formats[0])\n if validation_set:\n error_message = \", Validation: \"\n error_message += str(data_formats[1])\n if test_set:\n error_message = \", Test: \"\n error_message += str(data_formats[-1])\n raise ValueError(error_message)\n else:\n data_format = next(iter(data_formats_set))\n else:\n raise ValueError(\n \"At least one between dataset and training_set must be not None\"\n )\n return data_format\n\n\ndef is_model_dir(path: str) -> bool:\n hyperparameters_fn = os.path.join(path, MODEL_HYPERPARAMETERS_FILE_NAME)\n ts_metadata_fn = os.path.join(path, TRAIN_SET_METADATA_FILE_NAME)\n is_model_dir = False\n if (os.path.isdir(path)\n and os.path.isfile(hyperparameters_fn)\n and os.path.isfile(ts_metadata_fn)):\n weights_files_count = 0\n for file_name in os.listdir(path):\n if file_name.startswith(MODEL_WEIGHTS_FILE_NAME):\n weights_files_count += 1\n if weights_files_count >= 2:\n is_model_dir = True\n return is_model_dir\n\n\nexternal_data_reader_registry = {\n **{fmt: read_csv for fmt in CSV_FORMATS},\n **{fmt: read_tsv for fmt in TSV_FORMATS},\n **{fmt: read_json for fmt in JSON_FORMATS},\n **{fmt: read_jsonl for fmt in JSONL_FORMATS},\n **{fmt: read_excel for fmt in EXCEL_FORMATS},\n **{fmt: read_parquet for fmt in PARQUET_FORMATS},\n **{fmt: read_pickle for fmt in PICKLE_FORMATS},\n **{fmt: read_fwf for fmt in FWF_FORMATS},\n **{fmt: read_feather for fmt in FEATHER_FORMATS},\n **{fmt: read_html for fmt in HTML_FORMATS},\n **{fmt: read_orc for fmt in ORC_FORMATS},\n **{fmt: read_sas for fmt in SAS_FORMATS},\n **{fmt: read_spss for fmt in SPSS_FORMATS},\n **{fmt: read_stata for fmt in STATA_FORMATS}\n}\n", "path": "ludwig/utils/data_utils.py" } ]
diff --git a/ludwig/utils/data_utils.py b/ludwig/utils/data_utils.py index a3791d77836..51fe15f52e2 100644 --- a/ludwig/utils/data_utils.py +++ b/ludwig/utils/data_utils.py @@ -136,7 +136,12 @@ def read_jsonl(data_fp, df_lib): def read_excel(data_fp, df_lib): - return df_lib.read_excel(data_fp) + fp_split = os.path.splitext(data_fp) + if fp_split[1] == '.xls': + excel_engine = 'xlrd' + else: + excel_engine = 'openpyxl' + return df_lib.read_excel(data_fp, engine=excel_engine) def read_parquet(data_fp, df_lib): diff --git a/requirements.txt b/requirements.txt index c32e4120519..9227f66a8cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ requests # new data format support xlwt # excel -xlrd<2 # excel +xlrd # excel openpyxl # excel pyarrow # parquet lxml # html diff --git a/tests/integration_tests/test_experiment.py b/tests/integration_tests/test_experiment.py index 001ffde62ea..df997722fa3 100644 --- a/tests/integration_tests/test_experiment.py +++ b/tests/integration_tests/test_experiment.py @@ -435,7 +435,7 @@ def test_experiment_image_dataset( DATA_FORMATS_TO_TEST = [ - 'csv', 'df', 'dict', 'excel', 'feather', 'fwf', 'hdf5', 'html', + 'csv', 'df', 'dict', 'excel', 'excel_xls', 'feather', 'fwf', 'hdf5', 'html', 'json', 'jsonl', 'parquet', 'pickle', 'stata', 'tsv' ] @pytest.mark.parametrize('data_format', DATA_FORMATS_TO_TEST) diff --git a/tests/integration_tests/utils.py b/tests/integration_tests/utils.py index 9de747f8f14..ac6ba67b8b8 100644 --- a/tests/integration_tests/utils.py +++ b/tests/integration_tests/utils.py @@ -540,6 +540,13 @@ def to_fwf(df, fname): index=False ) + elif data_format == 'excel_xls': + dataset_to_use = replace_file_extension(raw_data, 'xls') + pd.read_csv(raw_data).to_excel( + dataset_to_use, + index=False + ) + elif data_format == 'feather': dataset_to_use = replace_file_extension(raw_data, 'feather') pd.read_csv(raw_data).to_feather(
liqd__a4-product-1097
archived projects accessible via activity feed At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.
[ { "content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py" } ]
[ { "content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py" } ]
diff --git a/apps/organisations/views.py b/apps/organisations/views.py index 96ae1ace3..51a95ba94 100644 --- a/apps/organisations/views.py +++ b/apps/organisations/views.py @@ -31,6 +31,7 @@ def get_context_data(self, **kwargs): context['action_list'] = Action.objects\ .filter(project__organisation=self.object)\ + .filter(project__is_archived=False) \ .filter_public()\ .exclude_updates()[:4]
sosreport__sos-1100
[rabbitmq] does not elide password in /etc/rabbitmq.config Hi, Sosreport as in sos-3.2-36.eo7ost.1.noarch includes in `/etc/rabbitmq/rabbitmq.conf` `default_user` and `default_pass` without being hidden. Regards, Pablo
[ { "content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass RabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"RabbitMQ messaging service\n \"\"\"\n plugin_name = 'rabbitmq'\n profiles = ('services',)\n var_puppet_gen = \"/var/lib/config-data/puppet-generated/rabbitmq\"\n files = (\n '/etc/rabbitmq/rabbitmq.conf',\n var_puppet_gen + '/etc/rabbitmq/rabbitmq.config'\n )\n packages = ('rabbitmq-server',)\n\n def setup(self):\n container_status = self.get_command_output(\n \"docker ps -a --format='{{ .Names }}'\")\n\n in_container = False\n container_names = []\n if container_status['status'] == 0:\n for line in container_status['output'].splitlines():\n if line.startswith(\"rabbitmq\"):\n in_container = True\n container_names.append(line)\n\n if in_container:\n for container in container_names:\n self.add_cmd_output('docker logs {0}'.format(container))\n self.add_cmd_output(\n 'docker exec -t {0} rabbitmqctl report'\n .format(container)\n )\n else:\n self.add_cmd_output(\"rabbitmqctl report\")\n\n self.add_copy_spec([\n \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/security/limits.d/\",\n self.var_puppet_gen + \"/etc/systemd/\"\n ])\n self.add_copy_spec([\n \"/var/log/rabbitmq/*\",\n \"/var/log/containers/rabbitmq/*\"\n ], sizelimit=self.get_option('log_size'))\n\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/plugins/rabbitmq.py" } ]
[ { "content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass RabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"RabbitMQ messaging service\n \"\"\"\n plugin_name = 'rabbitmq'\n profiles = ('services',)\n var_puppet_gen = \"/var/lib/config-data/puppet-generated/rabbitmq\"\n files = (\n '/etc/rabbitmq/rabbitmq.conf',\n var_puppet_gen + '/etc/rabbitmq/rabbitmq.config'\n )\n packages = ('rabbitmq-server',)\n\n def setup(self):\n container_status = self.get_command_output(\n \"docker ps -a --format='{{ .Names }}'\")\n\n in_container = False\n container_names = []\n if container_status['status'] == 0:\n for line in container_status['output'].splitlines():\n if line.startswith(\"rabbitmq\"):\n in_container = True\n container_names.append(line)\n\n if in_container:\n for container in container_names:\n self.add_cmd_output('docker logs {0}'.format(container))\n self.add_cmd_output(\n 'docker exec -t {0} rabbitmqctl report'\n .format(container)\n )\n else:\n self.add_cmd_output(\"rabbitmqctl report\")\n\n self.add_copy_spec([\n \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/security/limits.d/\",\n self.var_puppet_gen + \"/etc/systemd/\"\n ])\n self.add_copy_spec([\n \"/var/log/rabbitmq/*\",\n \"/var/log/containers/rabbitmq/*\"\n ], sizelimit=self.get_option('log_size'))\n\n def postproc(self):\n self.do_file_sub(\"/etc/rabbitmq/rabbitmq.conf\",\n r\"(\\s*default_pass\\s*,\\s*)\\S+\", r\"\\1<<***>>},\")\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/plugins/rabbitmq.py" } ]
diff --git a/sos/plugins/rabbitmq.py b/sos/plugins/rabbitmq.py index 8057dd90a7..f7528f8e0c 100644 --- a/sos/plugins/rabbitmq.py +++ b/sos/plugins/rabbitmq.py @@ -60,5 +60,8 @@ def setup(self): "/var/log/containers/rabbitmq/*" ], sizelimit=self.get_option('log_size')) + def postproc(self): + self.do_file_sub("/etc/rabbitmq/rabbitmq.conf", + r"(\s*default_pass\s*,\s*)\S+", r"\1<<***>>},") # vim: set et ts=4 sw=4 :
zenml-io__zenml-1388
[BUG]: S3 artifact store registration with single quotes produces SUPPORTED_SCHEMES error ### Contact Details [Optional] [email protected] ### System Information ZENML_LOCAL_VERSION: 0.34.0 ZENML_SERVER_VERSION: 0.34.0 ZENML_SERVER_DATABASE: mysql ZENML_SERVER_DEPLOYMENT_TYPE: other ZENML_CONFIG_DIR: C:\Users\Christian Versloot\AppData\Roaming\zenml ZENML_LOCAL_STORE_DIR: C:\Users\Christian Versloot\AppData\Roaming\zenml\local_stores ZENML_SERVER_URL: **removed** ZENML_ACTIVE_REPOSITORY_ROOT: None PYTHON_VERSION: 3.9.9 ENVIRONMENT: native SYSTEM_INFO: {'os': 'windows', 'windows_version_release': '10', 'windows_version': '10.0.19044', 'windows_version_service_pack': 'SP0', 'windows_version_os_type': 'Multiprocessor Free'} ACTIVE_WORKSPACE: default ACTIVE_STACK: default ACTIVE_USER: **removed** TELEMETRY_STATUS: enabled ANALYTICS_CLIENT_ID: 07bfbb21-15fa-4f7c-a6ea-27dee28902d9 ANALYTICS_USER_ID: e96e7521-73ed-4ff1-8aca-72d5b224a55e ANALYTICS_SERVER_ID: 9ed78563-2c57-4b86-9a6e-467ee05cc1c5 INTEGRATIONS: ['aws', 'github', 'kaniko', 'mlflow', 's3', 'scipy', 'sklearn'] PACKAGES: {'fsspec': '2022.11.0', 's3fs': '2022.11.0', 'certifi': '2021.10.8', 'pytz': '2021.3', 'pywin32': '305', 'setuptools': '60.1.0', 'cryptography': '36.0.1', 'kubernetes': '26.1.0', 'pyzmq': '25.0.0', 'sanic': '22.12.0', 'sanic-ext': '22.12.0', 'sanic-routing': '22.8.0', 'cattrs': '22.2.0', 'pycountry': '22.1.10', 'attrs': '22.1.0', 'gevent': '21.12.0', 'contextlib2': '21.6.0', 'argon2-cffi': '21.3.0', 'packaging': '21.3', 'azure-mgmt-resource': '21.2.1', 'argon2-cffi-bindings': '21.2.0', 'pyopenssl': '21.0.0', 'virtualenv': '20.19.0', 'clickclick': '20.10.2', 'pip': '20.3.4', 'azure-mgmt-storage': '20.1.0', 'gunicorn': '20.1.0', 'azure-storage-blob': '12.11.0', 'rich': '12.6.0', 'azure-mgmt-containerregistry': '10.1.0', 'websockets': '10.1', 'humanfriendly': '10.0', 'ipython': '8.11.0', 'pillow': '8.3.2', 'pyee': '8.2.2', 'click': '8.0.3', 'jupyter-client': '8.0.3', 'pyarrow': '8.0.0', 'ipywidgets': '7.7.3', 'nbconvert': '7.2.9', 'ipykernel': '6.21.2', 'notebook': '6.5.2', 'tornado': '6.2', 'multidict': '6.0.2', 'docker': '6.0.1', 'bleach': '6.0.0', 'pbr': '5.11.0', 'psutil': '5.9.0', 'traitlets': '5.9.0', 'nbformat': '5.7.3', 'pyyaml': '5.4.1', 'zope.interface': '5.4.0', 'cachetools': '5.3.0', 'jupyter-core': '5.2.0', 'decorator': '5.1.1', 'configobj': '5.0.6', 'azure-mgmt-redis': '5.0.0', 'smmap': '5.0.0', 'tqdm': '4.62.3', 'importlib-metadata': '4.11.3', 'conda': '4.11.0', 'beautifulsoup4': '4.10.0', 'rsa': '4.9', 'lxml': '4.8.0', 'opencv-python': '4.7.0.68', 'azure-mgmt-compute': '4.6.2', 'zope.event': '4.5.0', 'redis': '4.4.2', 'azure-mgmt-containerservice': '4.4.0', 'typing-extensions': '4.4.0', 'pyodbc': '4.0.32', 'gitdb': '4.0.10', 'mock': '4.0.3', 'async-timeout': '4.0.2', 'chardet': '4.0.0', 'progressbar2': '4.0.0', 'atlassian-python-api': '3.33.0', 'protobuf': '3.20.3', 'marshmallow': '3.19.0', 'ply': '3.11', 'filelock': '3.9.0', 'orjson': '3.8.5', 'aiohttp': '3.8.1', 'zipp': '3.8.0', 'aiofile': '3.7.4', 'widgetsnbextension': '3.6.2', 'tables': '3.6.1', 'gremlinpython': '3.6.1', 'constructs': '3.4.246', 'asyncio': '3.4.3', 'matplotlib': '3.4.3', 'asgiref': '3.4.1', 'markdown': '3.4.1', 'pyreadline3': '3.4.1', 'anyio': '3.4.0', 'h5py': '3.4.0', 'python-utils': '3.3.3', 'secretstorage': '3.3.3', 'aws-parallelcluster': '3.3.0', 'bcrypt': '3.2.2', 'pyproj': '3.2.1', 'jsonschema': '3.2.0', 'oauthlib': '3.2.0', 'wget': '3.2', 'gitpython': '3.1.31', 'aenum': '3.1.11', 'graphql-core': '3.1.7', 'idna': '3.1', 'threadpoolctl': '3.1.0', 'prompt-toolkit': '3.0.38', 'flask-cors': '3.0.10', 'openpyxl': '3.0.9', 'jinja2': '3.0.3', 'azure-mgmt-nspkg': '3.0.2', 'azure-nspkg': '3.0.2', 'azure-mgmt-datalake-nspkg': '3.0.1', 'azure-mgmt-authorization': '3.0.0', 'azure-mgmt-logic': '3.0.0', 'geojson': '3.0.0', 'platformdirs': '3.0.0', 'sagemaker': '2.117.0', 'tritonclient': '2.30.0', 'requests': '2.26.0', 'imageio': '2.24.0', 'pycparser': '2.21', 'awswrangler': '2.17.0', 'fastjsonschema': '2.16.3', 'google-auth': '2.16.1', 'pygments': '2.14.0', 'typeguard': '2.13.3', 'connexion': '2.13.1', 'google-api-core': '2.11.0', 'paramiko': '2.11.0', 'psycopg2': '2.9.3', 'networkx': '2.8.8', 'python-dateutil': '2.8.2', 'numexpr': '2.8.1', 'azure-mgmt-network': '2.7.0', 'google-cloud-storage': '2.7.0', 'portalocker': '2.7.0', 'azure-mgmt-eventhub': '2.6.0', 'locust': '2.5.1', 'pyparsing': '2.4.7', 'aiobotocore': '2.4.2', 'google-resumable-media': '2.4.1', 'google-cloud-core': '2.3.2', 'soupsieve': '2.3.2', 'pyshp': '2.3.1', 'jsonpointer': '2.3', 'jupyter-server': '2.3.0', 'pyjwt': '2.3.0', 'asttokens': '2.2.1', 'cloudpickle': '2.2.1', 'azure-mgmt-devtestlabs': '2.2.0', 'geopy': '2.2.0', 'jsonpickle': '2.2.0', 'waitress': '2.1.2', 'mlflow': '2.1.1', 'azure-mgmt-dns': '2.1.0', 'azure-mgmt-notificationhubs': '2.1.0', 'azure-mgmt-search': '2.1.0', 'redshift-connector': '2.0.909', 'pywinpty': '2.0.10', 'charset-normalizer': '2.0.9', 'python-json-logger': '2.0.7', 'mistune': '2.0.5', 'flask': '2.0.2', 'kafka-python': '2.0.2', 'werkzeug': '2.0.2', 'itsdangerous': '2.0.1', 'markupsafe': '2.0.1', 'shapely': '2.0.1', 'argcomplete': '2.0.0', 'azure-mgmt-consumption': '2.0.0', 'azure-mgmt-powerbiembedded': '2.0.0', 'azure-mgmt-scheduler': '2.0.0', 'opensearch-py': '2.0.0', 'aws-cdk.assets': '1.192.0', 'aws-cdk.aws-acmpca': '1.192.0', 'aws-cdk.aws-apigateway': '1.192.0', 'aws-cdk.aws-applicationautoscaling': '1.192.0', 'aws-cdk.aws-autoscaling': '1.192.0', 'aws-cdk.aws-autoscaling-common': '1.192.0', 'aws-cdk.aws-autoscaling-hooktargets': '1.192.0', 'aws-cdk.aws-batch': '1.192.0', 'aws-cdk.aws-certificatemanager': '1.192.0', 'aws-cdk.aws-cloudformation': '1.192.0', 'aws-cdk.aws-cloudfront': '1.192.0', 'aws-cdk.aws-cloudwatch': '1.192.0', 'aws-cdk.aws-codebuild': '1.192.0', 'aws-cdk.aws-codecommit': '1.192.0', 'aws-cdk.aws-codeguruprofiler': '1.192.0', 'aws-cdk.aws-codestarnotifications': '1.192.0', 'aws-cdk.aws-cognito': '1.192.0', 'aws-cdk.aws-dynamodb': '1.192.0', 'aws-cdk.aws-ec2': '1.192.0', 'aws-cdk.aws-ecr': '1.192.0', 'aws-cdk.aws-ecr-assets': '1.192.0', 'aws-cdk.aws-ecs': '1.192.0', 'aws-cdk.aws-efs': '1.192.0', 'aws-cdk.aws-elasticloadbalancing': '1.192.0', 'aws-cdk.aws-elasticloadbalancingv2': '1.192.0', 'aws-cdk.aws-events': '1.192.0', 'aws-cdk.aws-fsx': '1.192.0', 'aws-cdk.aws-globalaccelerator': '1.192.0', 'aws-cdk.aws-iam': '1.192.0', 'aws-cdk.aws-imagebuilder': '1.192.0', 'aws-cdk.aws-kinesis': '1.192.0', 'aws-cdk.aws-kms': '1.192.0', 'aws-cdk.aws-lambda': '1.192.0', 'aws-cdk.aws-logs': '1.192.0', 'aws-cdk.aws-route53': '1.192.0', 'aws-cdk.aws-route53-targets': '1.192.0', 'aws-cdk.aws-s3': '1.192.0', 'aws-cdk.aws-s3-assets': '1.192.0', 'aws-cdk.aws-sam': '1.192.0', 'aws-cdk.aws-secretsmanager': '1.192.0', 'aws-cdk.aws-servicediscovery': '1.192.0', 'aws-cdk.aws-signer': '1.192.0', 'aws-cdk.aws-sns': '1.192.0', 'aws-cdk.aws-sns-subscriptions': '1.192.0', 'aws-cdk.aws-sqs': '1.192.0', 'aws-cdk.aws-ssm': '1.192.0', 'aws-cdk.aws-stepfunctions': '1.192.0', 'aws-cdk.cloud-assembly-schema': '1.192.0', 'aws-cdk.core': '1.192.0', 'aws-cdk.custom-resources': '1.192.0', 'aws-cdk.cx-api': '1.192.0', 'aws-cdk.region-info': '1.192.0', 'jsii': '1.75.0', 'googleapis-common-protos': '1.58.0', 'aws-sam-translator': '1.55.0', 'geographiclib': '1.52', 'grpcio': '1.51.1', 'azureml-core': '1.49.0', 'jsonpatch': '1.32', 'pg8000': '1.29.3', 'botocore': '1.27.59', 'boto3': '1.26.7', 'urllib3': '1.26.7', 'azure-core': '1.26.3', 'pyresample': '1.26.1', 'georaster': '1.25', 'numpy': '1.22.4', 'w3lib': '1.22.0', 'msal': '1.21.0', 'parse': '1.19.0', 'six': '1.16.0', 'cffi': '1.15.0', 'wrapt': '1.13.3', 'pydantic': '1.10.4', 'backoff': '1.10.0', 'pkginfo': '1.9.6', 'azure-mgmt-rdbms': '1.9.0', 'junit-xml': '1.9', 'python-rapidjson': '1.9', 'alembic': '1.8.1', 'yarl': '1.8.1', 'distro': '1.8.0', 'send2trash': '1.8.0', 'ppft': '1.7.6.6', 'passlib': '1.7.4', 'conda-package-handling': '1.7.3', 'scipy': '1.7.2', 'pysocks': '1.7.1', 'debugpy': '1.6.6', 'cftime': '1.6.2', 'netcdf4': '1.6.2', 'monotonic': '1.6', 'nest-asyncio': '1.5.6', 'configargparse': '1.5.3', 'geventhttpclient': '1.5.3', 'jsonpath-ng': '1.5.3', 'pandas': '1.5.2', 'asn1crypto': '1.5.1', 'azure-mgmt-containerinstance': '1.5.0', 'google-crc32c': '1.5.0', 'pandocfilters': '1.5.0', 'pynacl': '1.5.0', 'sqlalchemy': '1.4.40', 'menuinst': '1.4.18', 'appdirs': '1.4.4', 'scramp': '1.4.4', 'pyquery': '1.4.3', 'azure-storage-common': '1.4.2', 'analytics-python': '1.4.0', 'basemap': '1.3.4', 'pykdtree': '1.3.4', 'frozenlist': '1.3.3', 'azure-mgmt-core': '1.3.2', 'basemap-data': '1.3.2', 'basemap-data-hires': '1.3.2', 'kiwisolver': '1.3.2', 'aiosignal': '1.3.1', 'requests-oauthlib': '1.3.1', 'mlserver': '1.3.0.dev2', 'mlserver-mlflow': '1.3.0.dev2', 'htmlgenerator': '1.2.27', 'deprecated': '1.2.13', 'adal': '1.2.7', 'mako': '1.2.4', 'querystring-parser': '1.2.4', 'jschema-to-python': '1.2.3', 'websocket-client': '1.2.2', 'tinycss2': '1.2.1', 'pandas-stubs': '1.2.0.57', 'executing': '1.2.0', 'joblib': '1.2.0', 'scikit-learn': '1.2.0', 'sniffio': '1.2.0', 'azure-common': '1.1.28', 'greenlet': '1.1.2', 'jupyterlab-widgets': '1.1.2', 'requests-aws4auth': '1.1.2', 'azure-mgmt-keyvault': '1.1.0', 'cssselect': '1.1.0', 'et-xmlfile': '1.1.0', 'exceptiongroup': '1.1.0', 'openapi': '1.1.0', 'win-inet-pton': '1.1.0', 'brotli': '1.0.9', 'sarif-om': '1.0.4', 'msgpack': '1.0.3', 'pymysql': '1.0.2', 'pyppeteer': '1.0.2', 'smdebug-rulesconfig': '1.0.1', 'backports.weakref': '1.0.post1', 'azure-mgmt-datamigration': '1.0.0', 'azure-mgmt-eventgrid': '1.0.0', 'azure-mgmt-media': '1.0.0', 'backports.tempfile': '1.0', 'msal-extensions': '1.0.0', 'cfn-lint': '0.72.6', 'multiprocess': '0.70.14', 'fastapi': '0.70.0', 'azure-graphrbac': '0.61.1', 'numba': '0.56.4', 'azure-mgmt-trafficmanager': '0.50.0', 'shap': '0.41.0', 'llvmlite': '0.39.1', 'sqlalchemy-utils': '0.38.3', 'wheel': '0.37.0', 'azure-mgmt-web': '0.35.0', 'zenml': '0.34.0', 'cython': '0.29.33', 'prometheus-flask-exporter': '0.22.0', 'python-dotenv': '0.21.1', 'httplib2': '0.19.1', 'jedi': '0.18.2', 'validators': '0.18.2', 'pyrsistent': '0.18.1', 'uvicorn': '0.17.6', 'databricks-cli': '0.17.4', 'terminado': '0.17.1', 'openai': '0.16.0', 'prometheus-client': '0.16.0', 'starlette': '0.16.0', 'ruamel-yaml-conda': '0.15.80', 'starlette-exporter': '0.15.1', 'ariadne': '0.14.0', 'h11': '0.12.0', 'aioitertools': '0.11.0', 'pathspec': '0.11.0', 'knack': '0.10.1', 'python-terraform': '0.10.1', 'cycler': '0.10.0', 'jmespath': '0.10.0', 'requests-html': '0.10.0', 'caio': '0.9.3', 'azure-mgmt-sql': '0.9.1', 'commonmark': '0.9.1', 'tabulate': '0.8.10', 'parso': '0.8.3', 'aiofiles': '0.8.0', 'aiokafka': '0.8.0', 'jeepney': '0.8.0', 'pickleshare': '0.7.5', 'schema': '0.7.5', 'nbclient': '0.7.2', 'defusedxml': '0.7.1', 'msrest': '0.7.1', 'brotlipy': '0.7.0', 'py-grpc-prometheus': '0.7.0', 'requests-auth-aws-sigv4': '0.7', 'watchgod': '0.7', 'msrestazure': '0.6.4', 'jupyter-events': '0.6.3', 'pycosat': '0.6.3', 'stack-data': '0.6.2', 'isodate': '0.6.1', 'azure-mgmt-datafactory': '0.6.0', 'azure-mgmt-datalake-analytics': '0.6.0', 's3transfer': '0.6.0', 'azure-mgmt-servicebus': '0.5.3', 'azure-mgmt-monitor': '0.5.2', 'nbclassic': '0.5.2', 'inflection': '0.5.1', 'ndg-httpsclient': '0.5.1', 'webencodings': '0.5.1', 'azure-mgmt-datalake-store': '0.5.0', 'azure-mgmt-iothub': '0.5.0', 'pyasn1': '0.4.8', 'colorama': '0.4.4', 'jupyter-server-terminals': '0.4.4', 'python-graphql-client': '0.4.3', 'sqlparse': '0.4.3', 'azure-mgmt-cosmosdb': '0.4.1', 'azure-mgmt-machinelearningcompute': '0.4.1', 'entrypoints': '0.4', 'parallel-pandas': '0.4.0', 'dill': '0.3.6', 'distlib': '0.3.6', 'apng': '0.3.4', 'pox': '0.3.2', 'azure-mgmt-recoveryservices': '0.3.0', 'azure-mgmt-recoveryservicesbackup': '0.3.0', 'click-params': '0.3.0', 'httptools': '0.3.0', 'pathos': '0.3.0', 'pysftp': '0.2.9', 'pyasn1-modules': '0.2.8', 'wcwidth': '0.2.6', 'jupyterlab-pygments': '0.2.2', 'notebook-shim': '0.2.2', 'pure-eval': '0.2.2', 'azure-mgmt-reservations': '0.2.1', 'wincertstore': '0.2', 'azure-mgmt-iothubprovisioningservices': '0.2.0', 'azure-mgmt-loganalytics': '0.2.0', 'azure-mgmt-msi': '0.2.0', 'azure-mgmt-servicefabric': '0.2.0', 'azure-mgmt-subscription': '0.2.0', 'backcall': '0.2.0', 'flask-basicauth': '0.2.0', 'google-pasta': '0.2.0', 'ipython-genutils': '0.2.0', 'fake-useragent': '0.1.11', 'bcpy': '0.1.8', 'matplotlib-inline': '0.1.6', 'protobuf3-to-dict': '0.1.5', 'rfc3339-validator': '0.1.4', 'comm': '0.1.2', 'azure-mgmt-hanaonazure': '0.1.1', 'azure-mgmt-managementpartner': '0.1.1', 'azure-mgmt-signalr': '0.1.1', 'rfc3986-validator': '0.1.1', 'azure-mgmt-devspaces': '0.1.0', 'azure-mgmt-iotcentral': '0.1.0', 'azure-mgmt-managementgroups': '0.1.0', 'azure-mgmt-maps': '0.1.0', 'azure-mgmt-marketplaceordering': '0.1.0', 'azure-mgmt-policyinsights': '0.1.0', 'azure-mgmt-relay': '0.1.0', 'sqlmodel': '0.0.8', 'slicer': '0.0.7', 'publication': '0.0.3', 'roundrobin': '0.0.2', 'sqlalchemy2-stubs': '0.0.2a32', 'bs4': '0.0.1', 'expertmodels': '0.0.4', 'weathercomputing': '0.0.6'} ### What happened? When adding an artifact store in the following way... ``` zenml artifact-store register infoplaza-ml-artifacts -f s3 --path='s3://bucketname' --authentication_secret=aws_s3_artifact_store ``` ...the following error is produced: ``` │ │ c:\users\public\miniconda\lib\site-packages\zenml\artifact_stores\base_artifact_store.py:157 in │ │ _ensure_artifact_store │ │ │ │ 154 │ │ if not any( │ │ 155 │ │ │ values["path"].startswith(i) for i in cls.SUPPORTED_SCHEMES │ │ 156 │ │ ): │ │ > 157 │ │ │ raise ArtifactStoreInterfaceError( │ │ 158 │ │ │ │ f"The path: '{values['path']}' you defined for your " │ │ 159 │ │ │ │ f"artifact store is not supported by the implementation of " │ │ 160 │ │ │ │ f"{cls.schema()['title']}, because it does not start with " │ └──────────────────────────────────────────────────────────────────────────────────────────────────┘ ArtifactStoreInterfaceError: The path: ''s3://bucketname'' you defined for your artifact store is not supported by the implementation of S3ArtifactStoreConfig, because it does not start with one of its supported schemes: {'s3://'}. ``` Removing the single quotes allows me to successfully register the artifact store. ``` zenml artifact-store register infoplaza-ml-artifacts -f s3 --path=s3://bucketname --authentication_secret=aws_s3_artifact_store ``` The docs contain the first method, i.e. with single quotes, which may confuse users: https://docs.zenml.io/component-gallery/artifact-stores/s3 A solution would either: 1. Add the single quotes to `SUPPORTED_SCHEMES` or ignore it when matching, or 2. Adapt the docs to the working command. In my view (1) would be the preferred option to allow users most flexibility. ### Reproduction steps 1. `zenml artifact-store register infoplaza-ml-artifacts -f s3 --path='s3://bucketname' --authentication_secret=aws_s3_artifact_store` ### Relevant log output _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "# Copyright (c) ZenML GmbH 2022. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"The base interface to extend the ZenML artifact store.\"\"\"\nimport textwrap\nfrom abc import abstractmethod\nfrom typing import (\n Any,\n Callable,\n ClassVar,\n Dict,\n Iterable,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom pydantic import root_validator\n\nfrom zenml.enums import StackComponentType\nfrom zenml.exceptions import ArtifactStoreInterfaceError\nfrom zenml.io import fileio\nfrom zenml.logger import get_logger\nfrom zenml.stack import Flavor, StackComponent, StackComponentConfig\nfrom zenml.utils import io_utils\n\nlogger = get_logger(__name__)\n\nPathType = Union[bytes, str]\n\n\ndef _sanitize_potential_path(potential_path: Any) -> Any:\n \"\"\"Sanitizes the input if it is a path.\n\n If the input is a **remote** path, this function replaces backslash path\n separators by forward slashes.\n\n Args:\n potential_path: Value that potentially refers to a (remote) path.\n\n Returns:\n The original input or a sanitized version of it in case of a remote\n path.\n \"\"\"\n if isinstance(potential_path, bytes):\n path = fileio.convert_to_str(potential_path)\n elif isinstance(potential_path, str):\n path = potential_path\n else:\n # Neither string nor bytes, this is not a path\n return potential_path\n\n if io_utils.is_remote(path):\n # If we have a remote path, replace windows path separators with\n # slashes\n import ntpath\n import posixpath\n\n path = path.replace(ntpath.sep, posixpath.sep)\n\n return path\n\n\ndef _sanitize_paths(_func: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Sanitizes path inputs before calling the original function.\n\n Args:\n _func: The function for which to sanitize the inputs.\n\n Returns:\n Function that calls the input function with sanitized path inputs.\n \"\"\"\n\n def inner_function(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Inner function.\n\n Args:\n *args: Positional args.\n **kwargs: Keyword args.\n\n Returns:\n Output of the input function called with sanitized paths.\n \"\"\"\n args = tuple(_sanitize_potential_path(arg) for arg in args)\n kwargs = {\n key: _sanitize_potential_path(value)\n for key, value in kwargs.items()\n }\n\n return _func(*args, **kwargs)\n\n return inner_function\n\n\nclass BaseArtifactStoreConfig(StackComponentConfig):\n \"\"\"Config class for `BaseArtifactStore`.\"\"\"\n\n path: str\n\n SUPPORTED_SCHEMES: ClassVar[Set[str]]\n\n @root_validator(skip_on_failure=True)\n def _ensure_artifact_store(cls, values: Dict[str, Any]) -> Any:\n \"\"\"Validator function for the Artifact Stores.\n\n Checks whether supported schemes are defined and the given path is\n supported.\n\n Args:\n values: The values to validate.\n\n Returns:\n The validated values.\n\n Raises:\n ArtifactStoreInterfaceError: If the scheme is not supported.\n \"\"\"\n try:\n getattr(cls, \"SUPPORTED_SCHEMES\")\n except AttributeError:\n raise ArtifactStoreInterfaceError(\n textwrap.dedent(\n \"\"\"\n When you are working with any classes which subclass from\n zenml.artifact_store.BaseArtifactStore please make sure\n that your class has a ClassVar named `SUPPORTED_SCHEMES`\n which should hold a set of supported file schemes such\n as {\"s3://\"} or {\"gcs://\"}.\n\n Example:\n\n class MyArtifactStoreConfig(BaseArtifactStoreConfig):\n ...\n # Class Variables\n SUPPORTED_SCHEMES: ClassVar[Set[str]] = {\"s3://\"}\n ...\n \"\"\"\n )\n )\n if not any(\n values[\"path\"].startswith(i) for i in cls.SUPPORTED_SCHEMES\n ):\n raise ArtifactStoreInterfaceError(\n f\"The path: '{values['path']}' you defined for your \"\n f\"artifact store is not supported by the implementation of \"\n f\"{cls.schema()['title']}, because it does not start with \"\n f\"one of its supported schemes: {cls.SUPPORTED_SCHEMES}.\"\n )\n\n return values\n\n\nclass BaseArtifactStore(StackComponent):\n \"\"\"Base class for all ZenML artifact stores.\"\"\"\n\n @property\n def config(self) -> BaseArtifactStoreConfig:\n \"\"\"Returns the `BaseArtifactStoreConfig` config.\n\n Returns:\n The configuration.\n \"\"\"\n return cast(BaseArtifactStoreConfig, self._config)\n\n @property\n def path(self) -> str:\n \"\"\"The path to the artifact store.\n\n Returns:\n The path.\n \"\"\"\n return self.config.path\n\n # --- User interface ---\n @abstractmethod\n def open(self, name: PathType, mode: str = \"r\") -> Any:\n \"\"\"Open a file at the given path.\n\n Args:\n name: The path of the file to open.\n mode: The mode to open the file.\n\n Returns:\n The file object.\n \"\"\"\n\n @abstractmethod\n def copyfile(\n self, src: PathType, dst: PathType, overwrite: bool = False\n ) -> None:\n \"\"\"Copy a file from the source to the destination.\n\n Args:\n src: The source path.\n dst: The destination path.\n overwrite: Whether to overwrite the destination file if it exists.\n \"\"\"\n\n @abstractmethod\n def exists(self, path: PathType) -> bool:\n \"\"\"Checks if a path exists.\n\n Args:\n path: The path to check.\n\n Returns:\n `True` if the path exists.\n \"\"\"\n\n @abstractmethod\n def glob(self, pattern: PathType) -> List[PathType]:\n \"\"\"Gets the paths that match a glob pattern.\n\n Args:\n pattern: The glob pattern.\n\n Returns:\n The list of paths that match the pattern.\n \"\"\"\n\n @abstractmethod\n def isdir(self, path: PathType) -> bool:\n \"\"\"Returns whether the given path points to a directory.\n\n Args:\n path: The path to check.\n\n Returns:\n `True` if the path points to a directory.\n \"\"\"\n\n @abstractmethod\n def listdir(self, path: PathType) -> List[PathType]:\n \"\"\"Returns a list of files under a given directory in the filesystem.\n\n Args:\n path: The path to list.\n\n Returns:\n The list of files under the given path.\n \"\"\"\n\n @abstractmethod\n def makedirs(self, path: PathType) -> None:\n \"\"\"Make a directory at the given path, recursively creating parents.\n\n Args:\n path: The path to create.\n \"\"\"\n\n @abstractmethod\n def mkdir(self, path: PathType) -> None:\n \"\"\"Make a directory at the given path; parent directory must exist.\n\n Args:\n path: The path to create.\n \"\"\"\n\n @abstractmethod\n def remove(self, path: PathType) -> None:\n \"\"\"Remove the file at the given path. Dangerous operation.\n\n Args:\n path: The path to remove.\n \"\"\"\n\n @abstractmethod\n def rename(\n self, src: PathType, dst: PathType, overwrite: bool = False\n ) -> None:\n \"\"\"Rename source file to destination file.\n\n Args:\n src: The source path.\n dst: The destination path.\n overwrite: Whether to overwrite the destination file if it exists.\n \"\"\"\n\n @abstractmethod\n def rmtree(self, path: PathType) -> None:\n \"\"\"Deletes dir recursively. Dangerous operation.\n\n Args:\n path: The path to delete.\n \"\"\"\n\n @abstractmethod\n def stat(self, path: PathType) -> Any:\n \"\"\"Return the stat descriptor for a given file path.\n\n Args:\n path: The path to check.\n\n Returns:\n The stat descriptor.\n \"\"\"\n\n def size(self, path: PathType) -> Optional[int]:\n \"\"\"Get the size of a file in bytes.\n\n Args:\n path: The path to the file.\n\n Returns:\n The size of the file in bytes or `None` if the artifact store\n does not implement the `size` method.\n \"\"\"\n logger.warning(\n \"Cannot get size of file '%s' since the artifact store %s does not \"\n \"implement the `size` method.\",\n path,\n self.__class__.__name__,\n )\n return None\n\n @abstractmethod\n def walk(\n self,\n top: PathType,\n topdown: bool = True,\n onerror: Optional[Callable[..., None]] = None,\n ) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:\n \"\"\"Return an iterator that walks the contents of the given directory.\n\n Args:\n top: The path to walk.\n topdown: Whether to walk the top-down or bottom-up.\n onerror: The error handler.\n\n Returns:\n The iterator that walks the contents of the given directory.\n \"\"\"\n\n # --- Internal interface ---\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initiate the Pydantic object and register the corresponding filesystem.\n\n Args:\n *args: The positional arguments to pass to the Pydantic object.\n **kwargs: The keyword arguments to pass to the Pydantic object.\n \"\"\"\n super(BaseArtifactStore, self).__init__(*args, **kwargs)\n self._register()\n\n def _register(self) -> None:\n \"\"\"Create and register a filesystem within the filesystem registry.\"\"\"\n from zenml.io.filesystem import BaseFilesystem\n from zenml.io.filesystem_registry import default_filesystem_registry\n from zenml.io.local_filesystem import LocalFilesystem\n\n # Local filesystem is always registered, no point in doing it again.\n if isinstance(self, LocalFilesystem):\n return\n\n filesystem_class = type(\n self.__class__.__name__,\n (BaseFilesystem,),\n {\n \"SUPPORTED_SCHEMES\": self.config.SUPPORTED_SCHEMES,\n \"open\": staticmethod(_sanitize_paths(self.open)),\n \"copyfile\": staticmethod(_sanitize_paths(self.copyfile)),\n \"exists\": staticmethod(_sanitize_paths(self.exists)),\n \"glob\": staticmethod(_sanitize_paths(self.glob)),\n \"isdir\": staticmethod(_sanitize_paths(self.isdir)),\n \"listdir\": staticmethod(_sanitize_paths(self.listdir)),\n \"makedirs\": staticmethod(_sanitize_paths(self.makedirs)),\n \"mkdir\": staticmethod(_sanitize_paths(self.mkdir)),\n \"remove\": staticmethod(_sanitize_paths(self.remove)),\n \"rename\": staticmethod(_sanitize_paths(self.rename)),\n \"rmtree\": staticmethod(_sanitize_paths(self.rmtree)),\n \"size\": staticmethod(_sanitize_paths(self.size)),\n \"stat\": staticmethod(_sanitize_paths(self.stat)),\n \"walk\": staticmethod(_sanitize_paths(self.walk)),\n },\n )\n\n default_filesystem_registry.register(filesystem_class)\n\n\nclass BaseArtifactStoreFlavor(Flavor):\n \"\"\"Base class for artifact store flavors.\"\"\"\n\n @property\n def type(self) -> StackComponentType:\n \"\"\"Returns the flavor type.\n\n Returns:\n The flavor type.\n \"\"\"\n return StackComponentType.ARTIFACT_STORE\n\n @property\n def config_class(self) -> Type[StackComponentConfig]:\n \"\"\"Config class for this flavor.\n\n Returns:\n The config class.\n \"\"\"\n return BaseArtifactStoreConfig\n\n @property\n @abstractmethod\n def implementation_class(self) -> Type[\"BaseArtifactStore\"]:\n \"\"\"Implementation class.\n\n Returns:\n The implementation class.\n \"\"\"\n", "path": "src/zenml/artifact_stores/base_artifact_store.py" } ]
[ { "content": "# Copyright (c) ZenML GmbH 2022. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"The base interface to extend the ZenML artifact store.\"\"\"\nimport textwrap\nfrom abc import abstractmethod\nfrom typing import (\n Any,\n Callable,\n ClassVar,\n Dict,\n Iterable,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom pydantic import root_validator\n\nfrom zenml.enums import StackComponentType\nfrom zenml.exceptions import ArtifactStoreInterfaceError\nfrom zenml.io import fileio\nfrom zenml.logger import get_logger\nfrom zenml.stack import Flavor, StackComponent, StackComponentConfig\nfrom zenml.utils import io_utils\n\nlogger = get_logger(__name__)\n\nPathType = Union[bytes, str]\n\n\ndef _sanitize_potential_path(potential_path: Any) -> Any:\n \"\"\"Sanitizes the input if it is a path.\n\n If the input is a **remote** path, this function replaces backslash path\n separators by forward slashes.\n\n Args:\n potential_path: Value that potentially refers to a (remote) path.\n\n Returns:\n The original input or a sanitized version of it in case of a remote\n path.\n \"\"\"\n if isinstance(potential_path, bytes):\n path = fileio.convert_to_str(potential_path)\n elif isinstance(potential_path, str):\n path = potential_path\n else:\n # Neither string nor bytes, this is not a path\n return potential_path\n\n if io_utils.is_remote(path):\n # If we have a remote path, replace windows path separators with\n # slashes\n import ntpath\n import posixpath\n\n path = path.replace(ntpath.sep, posixpath.sep)\n\n return path\n\n\ndef _sanitize_paths(_func: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Sanitizes path inputs before calling the original function.\n\n Args:\n _func: The function for which to sanitize the inputs.\n\n Returns:\n Function that calls the input function with sanitized path inputs.\n \"\"\"\n\n def inner_function(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Inner function.\n\n Args:\n *args: Positional args.\n **kwargs: Keyword args.\n\n Returns:\n Output of the input function called with sanitized paths.\n \"\"\"\n args = tuple(_sanitize_potential_path(arg) for arg in args)\n kwargs = {\n key: _sanitize_potential_path(value)\n for key, value in kwargs.items()\n }\n\n return _func(*args, **kwargs)\n\n return inner_function\n\n\nclass BaseArtifactStoreConfig(StackComponentConfig):\n \"\"\"Config class for `BaseArtifactStore`.\"\"\"\n\n path: str\n\n SUPPORTED_SCHEMES: ClassVar[Set[str]]\n\n @root_validator(skip_on_failure=True)\n def _ensure_artifact_store(cls, values: Dict[str, Any]) -> Any:\n \"\"\"Validator function for the Artifact Stores.\n\n Checks whether supported schemes are defined and the given path is\n supported.\n\n Args:\n values: The values to validate.\n\n Returns:\n The validated values.\n\n Raises:\n ArtifactStoreInterfaceError: If the scheme is not supported.\n \"\"\"\n try:\n getattr(cls, \"SUPPORTED_SCHEMES\")\n except AttributeError:\n raise ArtifactStoreInterfaceError(\n textwrap.dedent(\n \"\"\"\n When you are working with any classes which subclass from\n zenml.artifact_store.BaseArtifactStore please make sure\n that your class has a ClassVar named `SUPPORTED_SCHEMES`\n which should hold a set of supported file schemes such\n as {\"s3://\"} or {\"gcs://\"}.\n\n Example:\n\n class MyArtifactStoreConfig(BaseArtifactStoreConfig):\n ...\n # Class Variables\n SUPPORTED_SCHEMES: ClassVar[Set[str]] = {\"s3://\"}\n ...\n \"\"\"\n )\n )\n values[\"path\"] = values[\"path\"].strip(\"'\\\"`\")\n if not any(\n values[\"path\"].startswith(i) for i in cls.SUPPORTED_SCHEMES\n ):\n raise ArtifactStoreInterfaceError(\n f\"The path: '{values['path']}' you defined for your \"\n f\"artifact store is not supported by the implementation of \"\n f\"{cls.schema()['title']}, because it does not start with \"\n f\"one of its supported schemes: {cls.SUPPORTED_SCHEMES}.\"\n )\n\n return values\n\n\nclass BaseArtifactStore(StackComponent):\n \"\"\"Base class for all ZenML artifact stores.\"\"\"\n\n @property\n def config(self) -> BaseArtifactStoreConfig:\n \"\"\"Returns the `BaseArtifactStoreConfig` config.\n\n Returns:\n The configuration.\n \"\"\"\n return cast(BaseArtifactStoreConfig, self._config)\n\n @property\n def path(self) -> str:\n \"\"\"The path to the artifact store.\n\n Returns:\n The path.\n \"\"\"\n return self.config.path\n\n # --- User interface ---\n @abstractmethod\n def open(self, name: PathType, mode: str = \"r\") -> Any:\n \"\"\"Open a file at the given path.\n\n Args:\n name: The path of the file to open.\n mode: The mode to open the file.\n\n Returns:\n The file object.\n \"\"\"\n\n @abstractmethod\n def copyfile(\n self, src: PathType, dst: PathType, overwrite: bool = False\n ) -> None:\n \"\"\"Copy a file from the source to the destination.\n\n Args:\n src: The source path.\n dst: The destination path.\n overwrite: Whether to overwrite the destination file if it exists.\n \"\"\"\n\n @abstractmethod\n def exists(self, path: PathType) -> bool:\n \"\"\"Checks if a path exists.\n\n Args:\n path: The path to check.\n\n Returns:\n `True` if the path exists.\n \"\"\"\n\n @abstractmethod\n def glob(self, pattern: PathType) -> List[PathType]:\n \"\"\"Gets the paths that match a glob pattern.\n\n Args:\n pattern: The glob pattern.\n\n Returns:\n The list of paths that match the pattern.\n \"\"\"\n\n @abstractmethod\n def isdir(self, path: PathType) -> bool:\n \"\"\"Returns whether the given path points to a directory.\n\n Args:\n path: The path to check.\n\n Returns:\n `True` if the path points to a directory.\n \"\"\"\n\n @abstractmethod\n def listdir(self, path: PathType) -> List[PathType]:\n \"\"\"Returns a list of files under a given directory in the filesystem.\n\n Args:\n path: The path to list.\n\n Returns:\n The list of files under the given path.\n \"\"\"\n\n @abstractmethod\n def makedirs(self, path: PathType) -> None:\n \"\"\"Make a directory at the given path, recursively creating parents.\n\n Args:\n path: The path to create.\n \"\"\"\n\n @abstractmethod\n def mkdir(self, path: PathType) -> None:\n \"\"\"Make a directory at the given path; parent directory must exist.\n\n Args:\n path: The path to create.\n \"\"\"\n\n @abstractmethod\n def remove(self, path: PathType) -> None:\n \"\"\"Remove the file at the given path. Dangerous operation.\n\n Args:\n path: The path to remove.\n \"\"\"\n\n @abstractmethod\n def rename(\n self, src: PathType, dst: PathType, overwrite: bool = False\n ) -> None:\n \"\"\"Rename source file to destination file.\n\n Args:\n src: The source path.\n dst: The destination path.\n overwrite: Whether to overwrite the destination file if it exists.\n \"\"\"\n\n @abstractmethod\n def rmtree(self, path: PathType) -> None:\n \"\"\"Deletes dir recursively. Dangerous operation.\n\n Args:\n path: The path to delete.\n \"\"\"\n\n @abstractmethod\n def stat(self, path: PathType) -> Any:\n \"\"\"Return the stat descriptor for a given file path.\n\n Args:\n path: The path to check.\n\n Returns:\n The stat descriptor.\n \"\"\"\n\n def size(self, path: PathType) -> Optional[int]:\n \"\"\"Get the size of a file in bytes.\n\n Args:\n path: The path to the file.\n\n Returns:\n The size of the file in bytes or `None` if the artifact store\n does not implement the `size` method.\n \"\"\"\n logger.warning(\n \"Cannot get size of file '%s' since the artifact store %s does not \"\n \"implement the `size` method.\",\n path,\n self.__class__.__name__,\n )\n return None\n\n @abstractmethod\n def walk(\n self,\n top: PathType,\n topdown: bool = True,\n onerror: Optional[Callable[..., None]] = None,\n ) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:\n \"\"\"Return an iterator that walks the contents of the given directory.\n\n Args:\n top: The path to walk.\n topdown: Whether to walk the top-down or bottom-up.\n onerror: The error handler.\n\n Returns:\n The iterator that walks the contents of the given directory.\n \"\"\"\n\n # --- Internal interface ---\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initiate the Pydantic object and register the corresponding filesystem.\n\n Args:\n *args: The positional arguments to pass to the Pydantic object.\n **kwargs: The keyword arguments to pass to the Pydantic object.\n \"\"\"\n super(BaseArtifactStore, self).__init__(*args, **kwargs)\n self._register()\n\n def _register(self) -> None:\n \"\"\"Create and register a filesystem within the filesystem registry.\"\"\"\n from zenml.io.filesystem import BaseFilesystem\n from zenml.io.filesystem_registry import default_filesystem_registry\n from zenml.io.local_filesystem import LocalFilesystem\n\n # Local filesystem is always registered, no point in doing it again.\n if isinstance(self, LocalFilesystem):\n return\n\n filesystem_class = type(\n self.__class__.__name__,\n (BaseFilesystem,),\n {\n \"SUPPORTED_SCHEMES\": self.config.SUPPORTED_SCHEMES,\n \"open\": staticmethod(_sanitize_paths(self.open)),\n \"copyfile\": staticmethod(_sanitize_paths(self.copyfile)),\n \"exists\": staticmethod(_sanitize_paths(self.exists)),\n \"glob\": staticmethod(_sanitize_paths(self.glob)),\n \"isdir\": staticmethod(_sanitize_paths(self.isdir)),\n \"listdir\": staticmethod(_sanitize_paths(self.listdir)),\n \"makedirs\": staticmethod(_sanitize_paths(self.makedirs)),\n \"mkdir\": staticmethod(_sanitize_paths(self.mkdir)),\n \"remove\": staticmethod(_sanitize_paths(self.remove)),\n \"rename\": staticmethod(_sanitize_paths(self.rename)),\n \"rmtree\": staticmethod(_sanitize_paths(self.rmtree)),\n \"size\": staticmethod(_sanitize_paths(self.size)),\n \"stat\": staticmethod(_sanitize_paths(self.stat)),\n \"walk\": staticmethod(_sanitize_paths(self.walk)),\n },\n )\n\n default_filesystem_registry.register(filesystem_class)\n\n\nclass BaseArtifactStoreFlavor(Flavor):\n \"\"\"Base class for artifact store flavors.\"\"\"\n\n @property\n def type(self) -> StackComponentType:\n \"\"\"Returns the flavor type.\n\n Returns:\n The flavor type.\n \"\"\"\n return StackComponentType.ARTIFACT_STORE\n\n @property\n def config_class(self) -> Type[StackComponentConfig]:\n \"\"\"Config class for this flavor.\n\n Returns:\n The config class.\n \"\"\"\n return BaseArtifactStoreConfig\n\n @property\n @abstractmethod\n def implementation_class(self) -> Type[\"BaseArtifactStore\"]:\n \"\"\"Implementation class.\n\n Returns:\n The implementation class.\n \"\"\"\n", "path": "src/zenml/artifact_stores/base_artifact_store.py" } ]
diff --git a/src/zenml/artifact_stores/base_artifact_store.py b/src/zenml/artifact_stores/base_artifact_store.py index 780cf2da817..d23f0a02dd1 100644 --- a/src/zenml/artifact_stores/base_artifact_store.py +++ b/src/zenml/artifact_stores/base_artifact_store.py @@ -151,6 +151,7 @@ class MyArtifactStoreConfig(BaseArtifactStoreConfig): """ ) ) + values["path"] = values["path"].strip("'\"`") if not any( values["path"].startswith(i) for i in cls.SUPPORTED_SCHEMES ): diff --git a/tests/unit/artifact_stores/test_base_artifact_store.py b/tests/unit/artifact_stores/test_base_artifact_store.py index fff930dca81..e519722c21b 100644 --- a/tests/unit/artifact_stores/test_base_artifact_store.py +++ b/tests/unit/artifact_stores/test_base_artifact_store.py @@ -11,3 +11,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. +import pytest + +from zenml.artifact_stores.base_artifact_store import BaseArtifactStoreConfig +from zenml.exceptions import ArtifactStoreInterfaceError + + +class TestBaseArtifactStoreConfig: + class AriaArtifactStoreConfig(BaseArtifactStoreConfig): + SUPPORTED_SCHEMES = {"aria://"} + + @pytest.mark.parametrize( + "path", + [ + "aria://my-bucket/my-folder/my-file.txt", + "'aria://my-bucket/my-folder/my-file.txt'", + "`aria://my-bucket/my-folder/my-file.txt`", + '"aria://my-bucket/my-folder/my-file.txt"', + ], + ) + def test_valid_path(self, path): + config = self.AriaArtifactStoreConfig(path=path) + assert config.path == "aria://my-bucket/my-folder/my-file.txt" + + @pytest.mark.parametrize( + "path", + [ + "s3://my-bucket/my-folder/my-file.txt", + "http://my-bucket/my-folder/my-file.txt", + ], + ) + def test_invalid_path(self, path): + with pytest.raises(ArtifactStoreInterfaceError): + self.AriaArtifactStoreConfig(path=path)
fedora-infra__bodhi-2448
Bodhi sends e-mails when it cannot modify private bugs Bodhi tries to close private bugs, which it does not have permission to do. It should probably not try to do this in the first place, but more importantly it should catch the Exception that arises when it tries. Bodhi often sends error e-mails like this one: Message ------- [2018-06-09 20:39:48][ bodhi ERROR] ```python Unable to close bug #1549665 ``` Process Details --------------- - host: bodhi-backend01.phx2.fedoraproject.org - PID: 98853 - name: fedmsg-hub - command: /usr/bin/python2 /usr/bin/fedmsg-hub - msg_id: Callstack that lead to the logging statement -------------------------------------------- ```python File "/usr/lib64/python2.7/threading.py", line 777 in __bootstrap self.__bootstrap_inner() File "/usr/lib64/python2.7/threading.py", line 804 in __bootstrap_inner self.run() File "/usr/lib/python2.7/site-packages/bodhi/server/consumers/masher.py", line 337 in run self.work() File "/usr/lib/python2.7/site-packages/bodhi/server/consumers/masher.py", line 416 in work self.modify_bugs() File "/usr/lib/python2.7/site-packages/bodhi/server/consumers/masher.py", line 70 in wrapper retval = method(self, *args, **kwargs) File "/usr/lib/python2.7/site-packages/bodhi/server/consumers/masher.py", line 717 in modify_bugs update.modify_bugs() File "/usr/lib/python2.7/site-packages/bodhi/server/models.py", line 2735 in modify_bugs bug.close_bug(self) File "/usr/lib/python2.7/site-packages/bodhi/server/models.py", line 4155 in close_bug bugs.bugtracker.close(self.bug_id, versions=versions, comment=self.default_message(update)) File "/usr/lib/python2.7/site-packages/bodhi/server/bugs.py", line 222 in close log.exception("Unable to close bug #%d" % bug_id) ``` ``` Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/bodhi/server/bugs.py", line 199, in close bug = self.bz.getbug(bug_id) File "/usr/lib/python2.7/site-packages/bugzilla/base.py", line 1024, in getbug extra_fields=extra_fields) File "/usr/lib/python2.7/site-packages/bugzilla/base.py", line 1016, in _getbug return self._getbugs([objid], permissive=False, **kwargs)[0] File "/usr/lib/python2.7/site-packages/bugzilla/base.py", line 984, in _getbugs r = self._proxy.Bug.get(getbugdata) File "/usr/lib64/python2.7/xmlrpclib.py", line 1243, in __call__ return self.__send(self.__name, args) File "/usr/lib/python2.7/site-packages/bugzilla/transport.py", line 100, in _ServerProxy__request ret = ServerProxy._ServerProxy__request(self, methodname, params) File "/usr/lib64/python2.7/xmlrpclib.py", line 1602, in __request verbose=self.__verbose File "/usr/lib/python2.7/site-packages/bugzilla/transport.py", line 195, in request return self._request_helper(url, request_body) File "/usr/lib/python2.7/site-packages/bugzilla/transport.py", line 181, in _request_helper raise sys.exc_info()[1] Fault: <Fault 102: 'You are not authorized to access bug #1549665. Most likely the bug has been restricted for internal development processes and we cannot grant access. If you are a Red Hat customer with an active subscription, please visit the Red Hat Customer Portal for assistance with your issue If you are a Fedora Project user and require assistance, please consider using one of the mailing lists we host for the Fedora Project.'> ```
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright © 2013-2017 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Defines utilities for accessing Bugzilla.\"\"\"\n\nimport logging\n\nfrom collections import namedtuple\nfrom kitchen.text.converters import to_unicode\nimport bugzilla\nimport six\nfrom six.moves import xmlrpc_client\n\nfrom bodhi.server.config import config\n\n\nbugtracker = None\nlog = logging.getLogger('bodhi')\nFakeBug = namedtuple('FakeBug', ['bug_id'])\n\n\nclass BugTracker(object):\n \"\"\"A superclass to share between FakeBugTracker and Bugzilla.\"\"\"\n\n def _(self, *args, **kw): # pragma: no cover\n \"\"\"\n Raise NotImplementedError.\n\n Raises:\n NotImplementedError: Always.\n \"\"\"\n raise NotImplementedError\n\n getbug = update_details = modified = on_qa = close = update_details = _\n\n\nclass FakeBugTracker(BugTracker):\n \"\"\"Provide an API similar to bugzilla.base.Bugzilla without doing anything.\"\"\"\n\n def getbug(self, bug_id, *args, **kw):\n \"\"\"\n Return a FakeBug representing the requested bug id.\n\n Args:\n bug_id (basestring or int): The requested bug id.\n args (list): Unused.\n kwargs (dict): Unused.\n \"\"\"\n return FakeBug(bug_id=int(bug_id))\n\n def __noop__(self, *args, **kw):\n \"\"\"\n Log the method call at debug.\n\n Args:\n args (list): The list of args passed to the method.\n kwargs (dict): The kwargs passed to the method.\n \"\"\"\n log.debug('__noop__(%s)' % str(args))\n\n comment = update_details = modified = close = on_qa = __noop__\n\n\nclass InvalidComment(Exception):\n \"\"\"Exception thrown when the comment posted is invalid (for example too long).\"\"\"\n\n\nclass Bugzilla(BugTracker):\n \"\"\"Provide methods for Bodhi's frequent Bugzilla operations.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize self._bz as None.\"\"\"\n self._bz = None\n\n def _connect(self):\n \"\"\"Create a Bugzilla client instance and store it on self._bz.\"\"\"\n user = config.get('bodhi_email')\n password = config.get('bodhi_password')\n url = config.get(\"bz_server\")\n log.info(\"Using BZ URL %s\" % url)\n if user and password:\n self._bz = bugzilla.Bugzilla(url=url,\n user=user, password=password,\n cookiefile=None, tokenfile=None)\n else:\n self._bz = bugzilla.Bugzilla(url=url,\n cookiefile=None, tokenfile=None)\n\n @property\n def bz(self):\n \"\"\"\n Ensure we have connected to Bugzilla and return the client instance.\n\n Returns:\n bugzilla.base.Bugzilla: A client Bugzilla instance.\n \"\"\"\n if self._bz is None:\n self._connect()\n return self._bz\n\n def get_url(self, bug_id):\n \"\"\"\n Generate and return a URL to the given bug.\n\n Args:\n bug_id (basestring or int): The id of the bug you want a URl for.\n Returns:\n basestring: The requested URL.\n \"\"\"\n return \"%s/show_bug.cgi?id=%s\" % (config['bz_baseurl'], bug_id)\n\n def getbug(self, bug_id):\n \"\"\"\n Retrieve a bug from Bugzilla.\n\n Args:\n bug_id (int): The id of the bug you wish to retreive.\n Returns:\n bugzilla.bug.Bug: A Bug instance representing the bug in Bugzilla.\n \"\"\"\n return self.bz.getbug(bug_id)\n\n def comment(self, bug_id, comment):\n \"\"\"\n Add a comment to the given bug.\n\n Args:\n bug_id (int): The id of the bug you wish to comment on.\n comment (basestring): The comment to add to the bug.\n \"\"\"\n try:\n if len(comment) > 65535:\n raise InvalidComment(\"Comment is too long: %s\" % comment)\n bug = self.bz.getbug(bug_id)\n attempts = 0\n while attempts < 5:\n try:\n bug.addcomment(comment)\n break\n except xmlrpc_client.Fault as e:\n attempts += 1\n log.exception(\n \"\\nA fault has occurred \\nFault code: %d \\nFault string: %s\" %\n (e.faultCode, e.faultString))\n except InvalidComment:\n log.exception(\n \"Comment too long for bug #%d: %s\" % (bug_id, comment))\n except Exception:\n log.exception(\"Unable to add comment to bug #%d\" % bug_id)\n\n def on_qa(self, bug_id, comment):\n \"\"\"\n Change the status of this bug to ON_QA if it is not already ON_QA, VERIFIED, or CLOSED.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n This will also comment on the bug with some details on how to test and provide feedback for\n this update.\n\n Args:\n bug_id (int): The bug id you wish to set to ON_QA.\n comment (basestring): The comment to be included with the state change.\n \"\"\"\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set on_qa on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n if bug.bug_status not in ('ON_QA', 'VERIFIED', 'CLOSED'):\n log.debug(\"Setting Bug #%d to ON_QA\" % bug_id)\n bug.setstatus('ON_QA', comment=comment)\n else:\n bug.addcomment(comment)\n except Exception:\n log.exception(\"Unable to alter bug #%d\" % bug_id)\n\n def close(self, bug_id, versions, comment):\n \"\"\"\n Close the bug given by bug_id, mark it as fixed in the given versions, and add a comment.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n Args:\n bug_id (int): The ID of the bug you wish to close.\n versions (dict): A mapping of package names to nvrs of those packages that close the\n bug.\n comment (basestring): A comment to leave on the bug when closing it.\n \"\"\"\n args = {'comment': comment}\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set closed on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n # If this bug is for one of these builds...\n if bug.component in versions:\n version = versions[bug.component]\n # Get the existing list\n fixedin = [v.strip() for v in bug.fixed_in.split()]\n # Strip out any empty strings (already stripped)\n fixedin = [v for v in fixedin if v]\n\n # There are Red Hat preferences to how this field should be\n # structured. We should use:\n # - the full NVR as it appears in koji\n # - space-separated if there's more than one.\n fixedin_str = \" \".join(fixedin)\n\n # Add our build if its not already there\n # but only if resultant string length is lower than 256 chars\n # See https://github.com/fedora-infra/bodhi/issues/1430\n if (version not in fixedin) and ((len(fixedin_str) + len(version)) < 255):\n args['fixedin'] = \" \".join([fixedin_str, version]).strip()\n\n bug.close('ERRATA', **args)\n except xmlrpc_client.Fault:\n log.exception(\"Unable to close bug #%d\" % bug_id)\n\n def update_details(self, bug, bug_entity):\n \"\"\"\n Update the details on bug_entity to match what is found in Bugzilla.\n\n Args:\n bug (bugzilla.bug.Bug or None): The Bugzilla Bug we will use to update our own Bug\n object from. If None, bug_entity.bug_id will be used to fetch the object from\n Bugzilla.\n bug_entity(bodhi.server.models.Bug): The bug we wish to update.\n \"\"\"\n if not bug:\n try:\n bug = self.bz.getbug(bug_entity.bug_id)\n except xmlrpc_client.Fault:\n bug_entity.title = 'Invalid bug number'\n log.exception(\"Got fault from Bugzilla\")\n return\n except Exception:\n log.exception(\"Unknown exception from Bugzilla\")\n return\n if bug.product == 'Security Response':\n bug_entity.parent = True\n bug_entity.title = to_unicode(bug.short_desc)\n if isinstance(bug.keywords, six.string_types):\n keywords = bug.keywords.split()\n else: # python-bugzilla 0.8.0+\n keywords = bug.keywords\n if 'security' in [keyword.lower() for keyword in keywords]:\n bug_entity.security = True\n\n def modified(self, bug_id):\n \"\"\"\n Change the status of this bug to MODIFIED if not already MODIFIED, VERIFIED, or CLOSED.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n Args:\n bug_id (basestring or int): The bug you wish to mark MODIFIED.\n \"\"\"\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set modified on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n if bug.bug_status not in ('MODIFIED', 'VERIFIED', 'CLOSED'):\n log.info('Setting bug #%d status to MODIFIED' % bug_id)\n bug.setstatus('MODIFIED')\n except Exception:\n log.exception(\"Unable to alter bug #%d\" % bug_id)\n\n\ndef set_bugtracker():\n \"\"\"Set the module-level bugtracker attribute to the correct bugtracker, based on the config.\"\"\"\n global bugtracker\n if config.get('bugtracker') == 'bugzilla':\n log.info('Using python-bugzilla')\n bugtracker = Bugzilla()\n else:\n log.info('Using the FakeBugTracker')\n bugtracker = FakeBugTracker()\n", "path": "bodhi/server/bugs.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright © 2013-2017 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Defines utilities for accessing Bugzilla.\"\"\"\n\nimport logging\n\nfrom collections import namedtuple\nfrom kitchen.text.converters import to_unicode\nimport bugzilla\nimport six\nfrom six.moves import xmlrpc_client\n\nfrom bodhi.server.config import config\n\n\nbugtracker = None\nlog = logging.getLogger('bodhi')\nFakeBug = namedtuple('FakeBug', ['bug_id'])\n\n\nclass BugTracker(object):\n \"\"\"A superclass to share between FakeBugTracker and Bugzilla.\"\"\"\n\n def _(self, *args, **kw): # pragma: no cover\n \"\"\"\n Raise NotImplementedError.\n\n Raises:\n NotImplementedError: Always.\n \"\"\"\n raise NotImplementedError\n\n getbug = update_details = modified = on_qa = close = update_details = _\n\n\nclass FakeBugTracker(BugTracker):\n \"\"\"Provide an API similar to bugzilla.base.Bugzilla without doing anything.\"\"\"\n\n def getbug(self, bug_id, *args, **kw):\n \"\"\"\n Return a FakeBug representing the requested bug id.\n\n Args:\n bug_id (basestring or int): The requested bug id.\n args (list): Unused.\n kwargs (dict): Unused.\n \"\"\"\n return FakeBug(bug_id=int(bug_id))\n\n def __noop__(self, *args, **kw):\n \"\"\"\n Log the method call at debug.\n\n Args:\n args (list): The list of args passed to the method.\n kwargs (dict): The kwargs passed to the method.\n \"\"\"\n log.debug('__noop__(%s)' % str(args))\n\n comment = update_details = modified = close = on_qa = __noop__\n\n\nclass InvalidComment(Exception):\n \"\"\"Exception thrown when the comment posted is invalid (for example too long).\"\"\"\n\n\nclass Bugzilla(BugTracker):\n \"\"\"Provide methods for Bodhi's frequent Bugzilla operations.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize self._bz as None.\"\"\"\n self._bz = None\n\n def _connect(self):\n \"\"\"Create a Bugzilla client instance and store it on self._bz.\"\"\"\n user = config.get('bodhi_email')\n password = config.get('bodhi_password')\n url = config.get(\"bz_server\")\n log.info(\"Using BZ URL %s\" % url)\n if user and password:\n self._bz = bugzilla.Bugzilla(url=url,\n user=user, password=password,\n cookiefile=None, tokenfile=None)\n else:\n self._bz = bugzilla.Bugzilla(url=url,\n cookiefile=None, tokenfile=None)\n\n @property\n def bz(self):\n \"\"\"\n Ensure we have connected to Bugzilla and return the client instance.\n\n Returns:\n bugzilla.base.Bugzilla: A client Bugzilla instance.\n \"\"\"\n if self._bz is None:\n self._connect()\n return self._bz\n\n def get_url(self, bug_id):\n \"\"\"\n Generate and return a URL to the given bug.\n\n Args:\n bug_id (basestring or int): The id of the bug you want a URl for.\n Returns:\n basestring: The requested URL.\n \"\"\"\n return \"%s/show_bug.cgi?id=%s\" % (config['bz_baseurl'], bug_id)\n\n def getbug(self, bug_id):\n \"\"\"\n Retrieve a bug from Bugzilla.\n\n Args:\n bug_id (int): The id of the bug you wish to retreive.\n Returns:\n bugzilla.bug.Bug: A Bug instance representing the bug in Bugzilla.\n \"\"\"\n return self.bz.getbug(bug_id)\n\n def comment(self, bug_id, comment):\n \"\"\"\n Add a comment to the given bug.\n\n Args:\n bug_id (int): The id of the bug you wish to comment on.\n comment (basestring): The comment to add to the bug.\n \"\"\"\n try:\n if len(comment) > 65535:\n raise InvalidComment(\"Comment is too long: %s\" % comment)\n bug = self.bz.getbug(bug_id)\n attempts = 0\n while attempts < 5:\n try:\n bug.addcomment(comment)\n break\n except xmlrpc_client.Fault as e:\n attempts += 1\n log.exception(\n \"\\nA fault has occurred \\nFault code: %d \\nFault string: %s\" %\n (e.faultCode, e.faultString))\n except InvalidComment:\n log.exception(\n \"Comment too long for bug #%d: %s\" % (bug_id, comment))\n except Exception:\n log.exception(\"Unable to add comment to bug #%d\" % bug_id)\n\n def on_qa(self, bug_id, comment):\n \"\"\"\n Change the status of this bug to ON_QA if it is not already ON_QA, VERIFIED, or CLOSED.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n This will also comment on the bug with some details on how to test and provide feedback for\n this update.\n\n Args:\n bug_id (int): The bug id you wish to set to ON_QA.\n comment (basestring): The comment to be included with the state change.\n \"\"\"\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set on_qa on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n if bug.bug_status not in ('ON_QA', 'VERIFIED', 'CLOSED'):\n log.debug(\"Setting Bug #%d to ON_QA\" % bug_id)\n bug.setstatus('ON_QA', comment=comment)\n else:\n bug.addcomment(comment)\n except Exception:\n log.exception(\"Unable to alter bug #%d\" % bug_id)\n\n def close(self, bug_id, versions, comment):\n \"\"\"\n Close the bug given by bug_id, mark it as fixed in the given versions, and add a comment.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n Args:\n bug_id (int): The ID of the bug you wish to close.\n versions (dict): A mapping of package names to nvrs of those packages that close the\n bug.\n comment (basestring): A comment to leave on the bug when closing it.\n \"\"\"\n args = {'comment': comment}\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set closed on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n # If this bug is for one of these builds...\n if bug.component in versions:\n version = versions[bug.component]\n # Get the existing list\n fixedin = [v.strip() for v in bug.fixed_in.split()]\n # Strip out any empty strings (already stripped)\n fixedin = [v for v in fixedin if v]\n\n # There are Red Hat preferences to how this field should be\n # structured. We should use:\n # - the full NVR as it appears in koji\n # - space-separated if there's more than one.\n fixedin_str = \" \".join(fixedin)\n\n # Add our build if its not already there\n # but only if resultant string length is lower than 256 chars\n # See https://github.com/fedora-infra/bodhi/issues/1430\n if (version not in fixedin) and ((len(fixedin_str) + len(version)) < 255):\n args['fixedin'] = \" \".join([fixedin_str, version]).strip()\n\n bug.close('ERRATA', **args)\n except xmlrpc_client.Fault:\n log.info(\"Unable to close bug #%d\" % bug_id)\n\n def update_details(self, bug, bug_entity):\n \"\"\"\n Update the details on bug_entity to match what is found in Bugzilla.\n\n Args:\n bug (bugzilla.bug.Bug or None): The Bugzilla Bug we will use to update our own Bug\n object from. If None, bug_entity.bug_id will be used to fetch the object from\n Bugzilla.\n bug_entity(bodhi.server.models.Bug): The bug we wish to update.\n \"\"\"\n if not bug:\n try:\n bug = self.bz.getbug(bug_entity.bug_id)\n except xmlrpc_client.Fault:\n bug_entity.title = 'Invalid bug number'\n log.exception(\"Got fault from Bugzilla\")\n return\n except Exception:\n log.exception(\"Unknown exception from Bugzilla\")\n return\n if bug.product == 'Security Response':\n bug_entity.parent = True\n bug_entity.title = to_unicode(bug.short_desc)\n if isinstance(bug.keywords, six.string_types):\n keywords = bug.keywords.split()\n else: # python-bugzilla 0.8.0+\n keywords = bug.keywords\n if 'security' in [keyword.lower() for keyword in keywords]:\n bug_entity.security = True\n\n def modified(self, bug_id):\n \"\"\"\n Change the status of this bug to MODIFIED if not already MODIFIED, VERIFIED, or CLOSED.\n\n This method will only operate on bugs that are associated with products listed\n in the bz_products setting.\n\n Args:\n bug_id (basestring or int): The bug you wish to mark MODIFIED.\n \"\"\"\n try:\n bug = self.bz.getbug(bug_id)\n if bug.product not in config.get('bz_products'):\n log.info(\"Skipping set modified on {0!r} bug #{1}\".format(bug.product, bug_id))\n return\n if bug.bug_status not in ('MODIFIED', 'VERIFIED', 'CLOSED'):\n log.info('Setting bug #%d status to MODIFIED' % bug_id)\n bug.setstatus('MODIFIED')\n except Exception:\n log.exception(\"Unable to alter bug #%d\" % bug_id)\n\n\ndef set_bugtracker():\n \"\"\"Set the module-level bugtracker attribute to the correct bugtracker, based on the config.\"\"\"\n global bugtracker\n if config.get('bugtracker') == 'bugzilla':\n log.info('Using python-bugzilla')\n bugtracker = Bugzilla()\n else:\n log.info('Using the FakeBugTracker')\n bugtracker = FakeBugTracker()\n", "path": "bodhi/server/bugs.py" } ]
diff --git a/bodhi/server/bugs.py b/bodhi/server/bugs.py index afa092e85b..b83ced1f59 100644 --- a/bodhi/server/bugs.py +++ b/bodhi/server/bugs.py @@ -231,7 +231,7 @@ def close(self, bug_id, versions, comment): bug.close('ERRATA', **args) except xmlrpc_client.Fault: - log.exception("Unable to close bug #%d" % bug_id) + log.info("Unable to close bug #%d" % bug_id) def update_details(self, bug, bug_entity): """ diff --git a/bodhi/tests/server/test_bugs.py b/bodhi/tests/server/test_bugs.py index ee5a9fe15e..9804a66487 100644 --- a/bodhi/tests/server/test_bugs.py +++ b/bodhi/tests/server/test_bugs.py @@ -88,9 +88,9 @@ def test_bz_with__bz_set(self, _connect): self.assertTrue(return_value is bz._bz) self.assertEqual(_connect.call_count, 0) - @mock.patch('bodhi.server.bugs.log.exception') + @mock.patch('bodhi.server.bugs.log.info') @mock.patch.dict('bodhi.server.bugs.config', {'bz_products': 'aproduct'}) - def test_close_fault(self, exception): + def test_close_fault(self, info): """Assert that an xmlrpc Fault is caught and logged by close().""" bz = bugs.Bugzilla() bz._bz = mock.MagicMock() @@ -101,7 +101,7 @@ def test_close_fault(self, exception): # This should not raise an Exception. bz.close(12345, {'bodhi': 'bodhi-3.1.0-1.fc27'}, 'whabam!') - exception.assert_called_once_with('Unable to close bug #12345') + info.assert_called_once_with('Unable to close bug #12345') @mock.patch('bodhi.server.bugs.log.exception') @mock.patch.dict('bodhi.server.bugs.config', {'bz_products': 'aproduct'})
pymedusa__Medusa-7517
{{...}} in release name breaks manual episode search **Describe the bug** There is an NZB indexer which appends the password to the release name of each episode. The result looks like this: > ReleaseName{{ThePassword}} If the manual epsiode search contains such a result, the body stays blank and the browser console shows following error: `Something went wrong in getting the paramaters from dom. indexerName: undefined, seriesId: undefined, season: undefined, episode: undefined` at `snatch-selection.vue:227` The manual search works on the same indexer if none of the results has a password and thus the release names don't contain curly brackets. **To Reproduce** 1. Go to a TV Show 2. Click on "Manual Search" for a specific episode 3. Results in an empty page **Expected behavior** See a list of search results. **Screenshots** ![medusa](https://user-images.githubusercontent.com/59095677/71280044-7e3a4c00-235a-11ea-98ba-b9ed95425b64.png) **Medusa version** Latest docker image from https://github.com/linuxserver/docker-medusa Medusa Info: Branch: master Commit: Unknown Version: 0.3.9 Database: 44.14 Python Version: 3.7.5 (default, Oct 17 2019, 12:25:15) [GCC 8.3.0] **Debug logs** ``` 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: 279577: Formatting pattern: %SN - %AD - %EN -> Die Anstalt - 2019 11 05 - Treuhand 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Found codec for Die Anstalt Treuhand 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Parsed /tv/Die Anstalt/Season 06/Die Anstalt S06E07 11-19 into title: Die Anstalt, season: 6, episode: [7], episode_title: 11-19, type: episode, parsing_time: 0.04307889938354492, absolute_episode: [], quality: Unknown, total_time: 0.04324173927307129 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Matched release /tv/Die Anstalt/Season 06/Die Anstalt S06E07 11-19 to a series in your database: Die Anstalt 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Transaction with 3 queries executed 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Mass updating cache table with manual results for provider: Hydra 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Updating item: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} to cache: hydra 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Using cached parse result for Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding to cache item found in manual search: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Updating item: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED to cache: hydra 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Using cached parse result for Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding to cache item found in manual search: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Updating item: Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} to cache: hydra 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Using cached parse result for Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding to cache item found in manual search: Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Transaction with 3 queries executed 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found single episode result Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} at **********getnzb/api/308519251735629238?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} at **********getnzb/api/308519251735629238?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Added item: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} to cache: hydra with url: **********getnzb/api/308519251735629238?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding item from search to cache: 'Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{23XXhYzqdB}' 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found single episode result Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED at **********getnzb/api/-2571879803889673688?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED at **********getnzb/api/-2571879803889673688?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Added item: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED to cache: hydra with url: **********getnzb/api/-2571879803889673688?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding item from search to cache: 'Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED' 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found single episode result Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} at **********getnzb/api/-5790992511800115018?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} at **********getnzb/api/-5790992511800115018?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Added item: Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} to cache: hydra with url: **********getnzb/api/-5790992511800115018?apikey=********** 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Adding item from search to cache: 'Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{2OwRbg7rS5}' 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Parsed Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} into title: Die Anstalt, date: 2019-11-05, language: de, source: HDTV, video_codec: H.264, video_encoder: x264, other: Proper, proper_count: 1, proper_tag: REPACK, release_group: ACED, type: episode, parsing_time: 0.03590703010559082, season: 6, episode: [7], absolute_episode: [], quality: SDTV, total_time: 0.03661680221557617 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Matched release Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} to a series in your database: Die Anstalt 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Database info for series Die Anstalt: Season: 6 Episode(s): [7] 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Series Die Anstalt is air by date 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Parsed Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED into title: Die Anstalt, date: 2019-11-05, language: de, source: HDTV, video_codec: H.264, video_encoder: x264, other: Proper, proper_count: 1, proper_tag: REPACK, release_group: ACED, type: episode, parsing_time: 0.033495426177978516, season: 6, episode: [7], absolute_episode: [], quality: SDTV, total_time: 0.03432965278625488 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Matched release Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED to a series in your database: Die Anstalt 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Database info for series Die Anstalt: Season: 6 Episode(s): [7] 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Series Die Anstalt is air by date 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Parsed Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} into title: Die Anstalt, date: 2019-11-05, language: de, screen_size: 720p, source: HDTV, video_codec: H.264, video_encoder: x264, release_group: ACED, type: episode, parsing_time: 0.0420839786529541, season: 6, episode: [7], absolute_episode: [], quality: 720p HDTV, total_time: 0.042864084243774414 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Matched release Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} to a series in your database: Die Anstalt 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Database info for series Die Anstalt: Season: 6 Episode(s): [7] 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Series Die Anstalt is air by date 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found 3 unique search results 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: User-Agent: Medusa/0.3.9 (Linux; 4.19.0-0.bpo.6-amd64; 924e9818-2335-11ea-b365-0242993916a0) 2019-12-20 19:35:31 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: GET URL: **********api?t=search&limit=100&offset=0&cat=5000&maxage=3000&apikey=**********&q=Die+Anstalt+2019+11+05 [Status: 200] 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Search show using search string: Die Anstalt 2019 11 05 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Search mode: Episode 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-05-28.GERMAN.720p.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-05-28.GERMAN.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-07-16.GERMAN.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-07-16.GERMAN.720p.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.720p.HDTV.x264-ACED{{2OwRbg7rS5}} 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED{{23XXhYzqdB}} 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-11-05.GERMAN.HDTV.x264.REPACK-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-12-10.GERMAN.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Found result: Die.Anstalt.2019-12-10.GERMAN.720p.HDTV.x264-ACED 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: User-Agent: Medusa/0.3.9 (Linux; 4.19.0-0.bpo.6-amd64; 924e9818-2335-11ea-b365-0242993916a0) 2019-12-20 19:35:26 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: GET URL: **********api?t=tvsearch&limit=100&offset=0&cat=5000&maxage=3000&apikey=**********&tvdbid=279577&season=2019&ep=11%2F05 [Status: 200] 2019-12-20 19:35:23 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Search show using search string: indexer_id: {'tvdbid': 279577} 2019-12-20 19:35:23 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Search mode: Episode 2019-12-20 19:35:23 DEBUG FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Could not find any candidates in cache, searching provider. 2019-12-20 19:35:23 INFO FORCEDSEARCHQUEUE-MANUAL-279577 :: Hydra :: Performing episode search for Die Anstalt 2019-12-20 19:35:23 INFO FORCEDSEARCHQUEUE-MANUAL-279577 :: Using manual search providers ```
[ { "content": "# coding=utf-8\n\"\"\"Use setup tools to install Medusa.\"\"\"\nimport io\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', 'Arguments to pass into py.test')]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n\n errno = pytest.main(self.pytest_args.split(' '))\n sys.exit(errno)\n\n\ndef get_app_version():\n \"\"\"Get the app version from the code.\"\"\"\n pattern = re.compile(r\"VERSION = '([0-9.]+)'\")\n filename = os.path.join(here, 'medusa', 'common.py')\n with io.open(filename, 'r', encoding='utf-8') as fh:\n for line in fh:\n match = pattern.match(line)\n if match:\n return match.group(1)\n\n raise ValueError('Failed to get the app version!')\n\n\nwith open(os.path.join(here, 'readme.md'), 'r') as r:\n long_description = r.read()\n\n\ndef install_requires():\n with open(os.path.join(here, 'requirements.txt'), 'r') as r:\n return r.read().splitlines(keepends=False)\n\n\ndef packages():\n result = []\n\n for folder in ('medusa', 'ext', 'lib', 'themes'):\n if os.path.isdir(os.path.join(here, folder)):\n result.append(folder)\n\n for folder in ('ext2', 'ext3', 'lib2', 'lib3'):\n if os.path.isdir(os.path.join(here, folder)) and sys.version_info.major == int(folder[-1]):\n result.append(folder)\n\n return result\n\n\n# These requirements probably won't be needed\n# when `install_requires` is populated with `requirements.txt`\ntests_runtime_require = ['tornado==5.1.1', 'six', 'profilehooks', 'contextlib2', ]\n\nsetup(\n name='pymedusa',\n description='Automatic Video Library Manager for TV Shows',\n version=get_app_version(),\n author='pymedusa team',\n author_email='',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/pymedusa/Medusa',\n license='GPLv3',\n packages=packages(),\n include_package_data=True,\n # install_requires=install_requires(),\n extras_require={\n 'system-stats': ['psutil'],\n },\n entry_points={\n 'console_scripts': [\n 'medusa = medusa.__main__:main'\n ]\n },\n cmdclass={'test': PyTest},\n tests_require=tests_runtime_require + [\n 'flake8>=3.7.7',\n 'flake8-docstrings>=1.3.0',\n 'flake8-import-order>=0.18',\n 'flake8-quotes>=1.0.0',\n 'pep8-naming>=0.7.0',\n 'pycodestyle>=2.4.0',\n 'pytest<5.0.0 ; python_version < \"3.5\"',\n 'pytest>=5.0.0 ; python_version >= \"3.5\"',\n 'pytest-cov>=2.6.1',\n 'pytest-flake8>=1.0.2',\n 'pytest-tornado5>=2.0.0',\n 'PyYAML>=5.1',\n 'vcrpy>=2.0.1',\n 'mock>=2.0.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet',\n 'Topic :: Multimedia :: Video',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"Use setup tools to install Medusa.\"\"\"\nimport io\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', 'Arguments to pass into py.test')]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n\n errno = pytest.main(self.pytest_args.split(' '))\n sys.exit(errno)\n\n\ndef get_app_version():\n \"\"\"Get the app version from the code.\"\"\"\n pattern = re.compile(r\"VERSION = '([0-9.]+)'\")\n filename = os.path.join(here, 'medusa', 'common.py')\n with io.open(filename, 'r', encoding='utf-8') as fh:\n for line in fh:\n match = pattern.match(line)\n if match:\n return match.group(1)\n\n raise ValueError('Failed to get the app version!')\n\n\nwith open(os.path.join(here, 'readme.md'), 'r') as r:\n long_description = r.read()\n\n\ndef install_requires():\n with open(os.path.join(here, 'requirements.txt'), 'r') as r:\n return r.read().splitlines(keepends=False)\n\n\ndef packages():\n result = []\n\n for folder in ('medusa', 'ext', 'lib', 'themes'):\n if os.path.isdir(os.path.join(here, folder)):\n result.append(folder)\n\n for folder in ('ext2', 'ext3', 'lib2', 'lib3'):\n if os.path.isdir(os.path.join(here, folder)) and sys.version_info.major == int(folder[-1]):\n result.append(folder)\n\n return result\n\n\n# These requirements probably won't be needed\n# when `install_requires` is populated with `requirements.txt`\ntests_runtime_require = ['tornado==5.1.1', 'six', 'profilehooks', 'contextlib2', ]\n\nsetup(\n name='pymedusa',\n description='Automatic Video Library Manager for TV Shows',\n version=get_app_version(),\n author='pymedusa team',\n author_email='',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/pymedusa/Medusa',\n license='GPLv3',\n packages=packages(),\n include_package_data=True,\n # install_requires=install_requires(),\n extras_require={\n 'system-stats': ['psutil'],\n },\n entry_points={\n 'console_scripts': [\n 'medusa = medusa.__main__:main'\n ]\n },\n cmdclass={'test': PyTest},\n tests_require=tests_runtime_require + [\n 'flake8>=3.7.7',\n 'flake8-docstrings>=1.3.0',\n 'flake8-import-order>=0.18',\n 'flake8-quotes>=1.0.0',\n 'pep8-naming>=0.7.0',\n 'pycodestyle>=2.4.0',\n 'pytest<5.0.0 ; python_version < \"3.5\"',\n 'pytest>=5.0.0 ; python_version >= \"3.5\"',\n 'pytest-cov>=2.6.1',\n 'pytest-flake8>=1.0.2',\n 'pytest-tornado5>=2.0.0',\n 'PyYAML>=5.1',\n 'vcrpy<4.0.0 ; python_version < \"3.5\"',\n 'vcrpy>=4.0.0 ; python_version >= \"3.5\"',\n 'mock>=2.0.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet',\n 'Topic :: Multimedia :: Video',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index b33d4c8993..22a926025c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,8 @@ - Fix `torrents.verifyCert` config patch ignored warning ([#7501](https://github.com/pymedusa/Medusa/pull/7501)) - Fix dragging and saving Anime / Series list handles in Home - Poster layout ([#7502](https://github.com/pymedusa/Medusa/pull/7502)) - Fix adding Anime with white/black listed release groups ([#7507](https://github.com/pymedusa/Medusa/pull/7507)) -- Fix Schedule page and Forced Search on Schedule page ([#7512](https://github.com/pymedusa/Medusa/pull/#7512)) +- Fix Schedule page and Forced Search on Schedule page ([#7512](https://github.com/pymedusa/Medusa/pull/7512)) +- Fix manual search page release name bug ([#7517](https://github.com/pymedusa/Medusa/pull/7517)) ----- diff --git a/setup.py b/setup.py index 408b24cda7..6008685785 100644 --- a/setup.py +++ b/setup.py @@ -105,7 +105,8 @@ def packages(): 'pytest-flake8>=1.0.2', 'pytest-tornado5>=2.0.0', 'PyYAML>=5.1', - 'vcrpy>=2.0.1', + 'vcrpy<4.0.0 ; python_version < "3.5"', + 'vcrpy>=4.0.0 ; python_version >= "3.5"', 'mock>=2.0.0', ], classifiers=[ diff --git a/themes-default/slim/views/snatchSelection.mako b/themes-default/slim/views/snatchSelection.mako index 38e7704f31..284e1acfd1 100644 --- a/themes-default/slim/views/snatchSelection.mako +++ b/themes-default/slim/views/snatchSelection.mako @@ -119,7 +119,7 @@ % for hItem in provider_results['found_items']: <tr id="${hItem['name'] | h}" class="skipped season-${season} seasonstyle ${hItem['status_highlight']}" role="row"> <td class="release-name-ellipses triggerhighlight"> - <span data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> + <span v-pre data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> </td> <td class="col-group break-word triggerhighlight"> <span class="break-word ${hItem['rg_highlight']}">${hItem['release_group']}</span> diff --git a/themes/dark/templates/snatchSelection.mako b/themes/dark/templates/snatchSelection.mako index 38e7704f31..284e1acfd1 100644 --- a/themes/dark/templates/snatchSelection.mako +++ b/themes/dark/templates/snatchSelection.mako @@ -119,7 +119,7 @@ % for hItem in provider_results['found_items']: <tr id="${hItem['name'] | h}" class="skipped season-${season} seasonstyle ${hItem['status_highlight']}" role="row"> <td class="release-name-ellipses triggerhighlight"> - <span data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> + <span v-pre data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> </td> <td class="col-group break-word triggerhighlight"> <span class="break-word ${hItem['rg_highlight']}">${hItem['release_group']}</span> diff --git a/themes/light/templates/snatchSelection.mako b/themes/light/templates/snatchSelection.mako index 38e7704f31..284e1acfd1 100644 --- a/themes/light/templates/snatchSelection.mako +++ b/themes/light/templates/snatchSelection.mako @@ -119,7 +119,7 @@ % for hItem in provider_results['found_items']: <tr id="${hItem['name'] | h}" class="skipped season-${season} seasonstyle ${hItem['status_highlight']}" role="row"> <td class="release-name-ellipses triggerhighlight"> - <span data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> + <span v-pre data-qtip-my="top left" data-qtip-at="bottom left" :class="getReleaseNameClasses(`${hItem['name']}`)" title="${hItem['name'] | h}" class="break-word ${hItem['name_highlight']} addQTip">${hItem['name'] | h}</span> </td> <td class="col-group break-word triggerhighlight"> <span class="break-word ${hItem['rg_highlight']}">${hItem['release_group']}</span>
carpentries__amy-2028
Update colors of progress states The current color scheme for our progress states makes it hard to know when something needs further attention and when it does not. Previously we had three colors: Red-Failed, Yellow-Not evaluated yet and Green-Passed. The new 'Asked to Repeat' progress state option is also yellow and so it conflicts with the 'Not Evaluated Yet' option. **Please update the color for 'Asked to Repeat' to blue.. Any shade will do.** Progress state colors can be viewed on the [More>Trainees page ](https://amy.carpentries.org/trainings/trainees/)
[ { "content": "from django import template\nfrom django.template.defaultfilters import escape\nfrom django.utils.safestring import mark_safe\n\nfrom workshops.models import TrainingProgress\n\nregister = template.Library()\n\n\[email protected]_tag\ndef progress_label(progress):\n assert isinstance(progress, TrainingProgress)\n\n if progress.discarded:\n additional_label = \"dark\"\n\n else:\n switch = {\n \"n\": \"warning\",\n \"f\": \"danger\",\n \"a\": \"warning\",\n \"p\": \"success\",\n }\n additional_label = switch[progress.state]\n\n fmt = \"badge badge-{}\".format(additional_label)\n return mark_safe(fmt)\n\n\[email protected]_tag\ndef progress_description(progress):\n assert isinstance(progress, TrainingProgress)\n\n text = \"{discarded}{state} {type}<br />{evaluated_by}<br />on {day}.{notes}\".format(\n discarded=\"discarded \" if progress.discarded else \"\",\n state=progress.get_state_display(),\n type=progress.requirement,\n evaluated_by=(\n \"evaluated by {}\".format(progress.evaluated_by.full_name)\n if progress.evaluated_by is not None\n else \"submitted\"\n ),\n day=progress.created_at.strftime(\"%A %d %B %Y at %H:%M\"),\n notes=\"<br />Notes: {}\".format(escape(progress.notes))\n if progress.notes\n else \"\",\n )\n text = text[0].upper() + text[1:]\n return mark_safe(text)\n", "path": "amy/workshops/templatetags/training_progress.py" } ]
[ { "content": "from django import template\nfrom django.template.defaultfilters import escape\nfrom django.utils.safestring import mark_safe\n\nfrom workshops.models import TrainingProgress\n\nregister = template.Library()\n\n\[email protected]_tag\ndef progress_label(progress):\n assert isinstance(progress, TrainingProgress)\n\n if progress.discarded:\n additional_label = \"dark\"\n\n else:\n switch = {\n \"n\": \"warning\",\n \"f\": \"danger\",\n \"a\": \"info\",\n \"p\": \"success\",\n }\n additional_label = switch[progress.state]\n\n fmt = \"badge badge-{}\".format(additional_label)\n return mark_safe(fmt)\n\n\[email protected]_tag\ndef progress_description(progress):\n assert isinstance(progress, TrainingProgress)\n\n text = \"{discarded}{state} {type}<br />{evaluated_by}<br />on {day}.{notes}\".format(\n discarded=\"discarded \" if progress.discarded else \"\",\n state=progress.get_state_display(),\n type=progress.requirement,\n evaluated_by=(\n \"evaluated by {}\".format(progress.evaluated_by.full_name)\n if progress.evaluated_by is not None\n else \"submitted\"\n ),\n day=progress.created_at.strftime(\"%A %d %B %Y at %H:%M\"),\n notes=\"<br />Notes: {}\".format(escape(progress.notes))\n if progress.notes\n else \"\",\n )\n text = text[0].upper() + text[1:]\n return mark_safe(text)\n", "path": "amy/workshops/templatetags/training_progress.py" } ]
diff --git a/amy/templates/trainings/all_trainees.html b/amy/templates/trainings/all_trainees.html index c300f236b..ad161a776 100644 --- a/amy/templates/trainings/all_trainees.html +++ b/amy/templates/trainings/all_trainees.html @@ -30,7 +30,10 @@ <span class='badge badge-success'>Passed</span> <span class='badge badge-warning'>Not evaluated</span> <span class='badge badge-danger'>Failed</span> - <span class='badge badge-dark'><strike>Discarded</strike></span></p> + <span class='badge badge-info'>Asked to repeat</span> + <!-- 'Discarded' is also strokethrough, but for some reason it would not display in popover --> + <span class='badge badge-dark'>Discarded</span> + </p> <p>Click one of labels below to edit or delete it.</p> "></i> </th> diff --git a/amy/workshops/templatetags/training_progress.py b/amy/workshops/templatetags/training_progress.py index e10f01d9a..35f8f4662 100644 --- a/amy/workshops/templatetags/training_progress.py +++ b/amy/workshops/templatetags/training_progress.py @@ -18,7 +18,7 @@ def progress_label(progress): switch = { "n": "warning", "f": "danger", - "a": "warning", + "a": "info", "p": "success", } additional_label = switch[progress.state]
rucio__rucio-1372
Fix activity in BB8 Motivation ---------- BB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.4',\n 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n 'revno': 6827\n}\n", "path": "lib/rucio/vcsversion.py" } ]
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.5',\n 'branch_nick': 'patch-0-1_17_5_preparation',\n 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',\n 'revno': 6837\n}\n", "path": "lib/rucio/vcsversion.py" } ]
diff --git a/doc/source/releasenotes/1.17.5.rst b/doc/source/releasenotes/1.17.5.rst new file mode 100644 index 0000000000..d1ffa3fb57 --- /dev/null +++ b/doc/source/releasenotes/1.17.5.rst @@ -0,0 +1,35 @@ +====== +1.17.5 +====== + +------- +General +------- + +************ +Enhancements +************ + +- Documentation: rucio documentation fixes `#1310 <https://github.com/rucio/rucio/issues/1310>`_ +- Infrastructure: cx_oracle version in containers should be fixed to 6.3.1 `#1318 <https://github.com/rucio/rucio/issues/1318>`_ +- Infrastructure: Docker image build of rucio demo fails `#1329 <https://github.com/rucio/rucio/issues/1329>`_ +- Infrastructure: Incorrect configuration of rucio demo? `#1334 <https://github.com/rucio/rucio/issues/1334>`_ + +**** +Bugs +**** + +- Rebalancing: Fix activity in BB8 `#1361 <https://github.com/rucio/rucio/issues/1361>`_ +- Testing: Reset db tables script fails silently `#1317 <https://github.com/rucio/rucio/issues/1317>` + +------- +Clients +------- + +**** +Bugs +**** + +- RSE Manager: Deleting left-overs of first attempt of upload by correct protocol `#1346 <https://github.com/rucio/rucio/issues/1346>`_ +- Release management: Downgrade to older setuptools version as 39.0.1 is not py2.6 compatible `#1355 <https://github.com/rucio/rucio/issues/1355>`_ + diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py index a30d1d15f2..5b5e58ab30 100644 --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.17.4', - 'branch_nick': 'patch-0-1_17_4_client_release_prep', - 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e', - 'revno': 6827 + 'version': '1.17.5', + 'branch_nick': 'patch-0-1_17_5_preparation', + 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275', + 'revno': 6837 } diff --git a/lib/rucio/web/ui/static/webui_version b/lib/rucio/web/ui/static/webui_version index 250f359745..74921a9666 100644 --- a/lib/rucio/web/ui/static/webui_version +++ b/lib/rucio/web/ui/static/webui_version @@ -1 +1 @@ -1.17.4 \ No newline at end of file +1.17.5 \ No newline at end of file
streamlit__streamlit-2499
st.number_input doesn't accept reasonable int arguments # Summary Using `st.number_input` in a very reasonable way: ```python x = st.number_input("x", 0, 10) ``` causes an exception to be thrown ``` StreamlitAPIException: All arguments must be of the same type. value has float type. min_value has int type. max_value has int type. Traceback: x = st.number_input("x", 0, 10) ``` ## Expected behavior: This should "just work," in the sense that it should create a number input that accepts `int`s between 0 and 10, with an initial default value of 0. ## Actual behavior: You get the exception above. You can "trick" Streamlit into providing the right behavior by forcing the `value` parameter to have type `int` as follows: ```python x = st.number_input("x", 0, 10, 0) ``` But I think this should just work without that extra parameter. ## Is this a regression? ?? # Debug info - Streamlit version: `Streamlit, version 0.73.0` - Python version: `Python 3.8.5` - Python environment: `pipenv, version 2020.11.4` - OS version: `Ubuntu 20.04.1 LTS`
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numbers\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.js_number import JSNumber, JSNumberBoundsException\nfrom streamlit.proto.NumberInput_pb2 import NumberInput as NumberInputProto\nfrom .utils import register_widget, NoValue\n\n\nclass NumberInputMixin:\n def number_input(\n self,\n label,\n min_value=None,\n max_value=None,\n value=NoValue(),\n step=None,\n format=None,\n key=None,\n ):\n \"\"\"Display a numeric input widget.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this input is for.\n min_value : int or float or None\n The minimum permitted value.\n If None, there will be no minimum.\n max_value : int or float or None\n The maximum permitted value.\n If None, there will be no maximum.\n value : int or float or None\n The value of this widget when it first renders.\n Defaults to min_value, or 0.0 if min_value is None\n step : int or float or None\n The stepping interval.\n Defaults to 1 if the value is an int, 0.01 otherwise.\n If the value is not specified, the format parameter will be used.\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. Output must be purely numeric. This does not impact\n the return value. Valid formatters: %d %e %f %g %i\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n int or float\n The current value of the numeric input widget. The return type\n will match the data type of the value parameter.\n\n Example\n -------\n >>> number = st.number_input('Insert a number')\n >>> st.write('The current number is ', number)\n \"\"\"\n\n if isinstance(value, NoValue):\n if min_value:\n value = min_value\n else:\n value = 0.0 # We set a float as default\n\n int_value = isinstance(value, numbers.Integral)\n float_value = isinstance(value, float)\n\n if value is None:\n raise StreamlitAPIException(\n \"Default value for number_input should be an int or a float.\"\n )\n else:\n if format is None:\n format = \"%d\" if int_value else \"%0.2f\"\n\n if format in [\"%d\", \"%u\", \"%i\"] and float_value:\n # Warn user to check if displaying float as int was really intended.\n import streamlit as st\n\n st.warning(\n \"Warning: NumberInput value below is float, but format {} displays as integer.\".format(\n format\n )\n )\n\n if step is None:\n step = 1 if int_value else 0.01\n\n try:\n float(format % 2)\n except (TypeError, ValueError):\n raise StreamlitAPIException(\n \"Format string for st.number_input contains invalid characters: %s\"\n % format\n )\n\n # Ensure that all arguments are of the same type.\n args = [min_value, max_value, step]\n\n int_args = all(\n map(\n lambda a: (\n isinstance(a, numbers.Integral) or isinstance(a, type(None))\n ),\n args,\n )\n )\n float_args = all(\n map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args)\n )\n\n if not int_args and not float_args:\n raise StreamlitAPIException(\n \"All arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that the value matches arguments' types.\n all_ints = int_value and int_args\n all_floats = float_value and float_args\n\n if not all_ints and not all_floats:\n raise StreamlitAPIException(\n \"All numerical arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n \"\\n`step` has %(step_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n \"step_type\": type(step).__name__,\n }\n )\n\n if (min_value and min_value > value) or (max_value and max_value < value):\n raise StreamlitAPIException(\n \"The default `value` of %(value)s \"\n \"must lie between the `min_value` of %(min)s \"\n \"and the `max_value` of %(max)s, inclusively.\"\n % {\"value\": value, \"min\": min_value, \"max\": max_value}\n )\n\n # Bounds checks. JSNumber produces human-readable exceptions that\n # we simply re-package as StreamlitAPIExceptions.\n try:\n if all_ints:\n if min_value is not None:\n JSNumber.validate_int_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_int_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_int_bounds(step, \"`step`\")\n JSNumber.validate_int_bounds(value, \"`value`\")\n else:\n if min_value is not None:\n JSNumber.validate_float_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_float_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_float_bounds(step, \"`step`\")\n JSNumber.validate_float_bounds(value, \"`value`\")\n except JSNumberBoundsException as e:\n raise StreamlitAPIException(str(e))\n\n number_input_proto = NumberInputProto()\n number_input_proto.data_type = (\n NumberInputProto.INT if all_ints else NumberInputProto.FLOAT\n )\n number_input_proto.label = label\n number_input_proto.default = value\n\n if min_value is not None:\n number_input_proto.min = min_value\n number_input_proto.has_min = True\n\n if max_value is not None:\n number_input_proto.max = max_value\n number_input_proto.has_max = True\n\n if step is not None:\n number_input_proto.step = step\n\n if format is not None:\n number_input_proto.format = format\n\n ui_value = register_widget(\"number_input\", number_input_proto, user_key=key)\n\n return_value = ui_value if ui_value is not None else value\n return self.dg._enqueue(\"number_input\", number_input_proto, return_value)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/number_input.py" } ]
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numbers\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.js_number import JSNumber, JSNumberBoundsException\nfrom streamlit.proto.NumberInput_pb2 import NumberInput as NumberInputProto\nfrom .utils import register_widget, NoValue\n\n\nclass NumberInputMixin:\n def number_input(\n self,\n label,\n min_value=None,\n max_value=None,\n value=NoValue(),\n step=None,\n format=None,\n key=None,\n ):\n \"\"\"Display a numeric input widget.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this input is for.\n min_value : int or float or None\n The minimum permitted value.\n If None, there will be no minimum.\n max_value : int or float or None\n The maximum permitted value.\n If None, there will be no maximum.\n value : int or float or None\n The value of this widget when it first renders.\n Defaults to min_value, or 0.0 if min_value is None\n step : int or float or None\n The stepping interval.\n Defaults to 1 if the value is an int, 0.01 otherwise.\n If the value is not specified, the format parameter will be used.\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. Output must be purely numeric. This does not impact\n the return value. Valid formatters: %d %e %f %g %i\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n int or float\n The current value of the numeric input widget. The return type\n will match the data type of the value parameter.\n\n Example\n -------\n >>> number = st.number_input('Insert a number')\n >>> st.write('The current number is ', number)\n \"\"\"\n\n if isinstance(value, NoValue):\n if min_value is not None:\n value = min_value\n else:\n value = 0.0 # We set a float as default\n\n int_value = isinstance(value, numbers.Integral)\n float_value = isinstance(value, float)\n\n if value is None:\n raise StreamlitAPIException(\n \"Default value for number_input should be an int or a float.\"\n )\n else:\n if format is None:\n format = \"%d\" if int_value else \"%0.2f\"\n\n if format in [\"%d\", \"%u\", \"%i\"] and float_value:\n # Warn user to check if displaying float as int was really intended.\n import streamlit as st\n\n st.warning(\n \"Warning: NumberInput value below is float, but format {} displays as integer.\".format(\n format\n )\n )\n\n if step is None:\n step = 1 if int_value else 0.01\n\n try:\n float(format % 2)\n except (TypeError, ValueError):\n raise StreamlitAPIException(\n \"Format string for st.number_input contains invalid characters: %s\"\n % format\n )\n\n # Ensure that all arguments are of the same type.\n args = [min_value, max_value, step]\n\n int_args = all(\n map(\n lambda a: (\n isinstance(a, numbers.Integral) or isinstance(a, type(None))\n ),\n args,\n )\n )\n float_args = all(\n map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args)\n )\n\n if not int_args and not float_args:\n raise StreamlitAPIException(\n \"All arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that the value matches arguments' types.\n all_ints = int_value and int_args\n all_floats = float_value and float_args\n\n if not all_ints and not all_floats:\n raise StreamlitAPIException(\n \"All numerical arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n \"\\n`step` has %(step_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n \"step_type\": type(step).__name__,\n }\n )\n\n if (min_value and min_value > value) or (max_value and max_value < value):\n raise StreamlitAPIException(\n \"The default `value` of %(value)s \"\n \"must lie between the `min_value` of %(min)s \"\n \"and the `max_value` of %(max)s, inclusively.\"\n % {\"value\": value, \"min\": min_value, \"max\": max_value}\n )\n\n # Bounds checks. JSNumber produces human-readable exceptions that\n # we simply re-package as StreamlitAPIExceptions.\n try:\n if all_ints:\n if min_value is not None:\n JSNumber.validate_int_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_int_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_int_bounds(step, \"`step`\")\n JSNumber.validate_int_bounds(value, \"`value`\")\n else:\n if min_value is not None:\n JSNumber.validate_float_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_float_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_float_bounds(step, \"`step`\")\n JSNumber.validate_float_bounds(value, \"`value`\")\n except JSNumberBoundsException as e:\n raise StreamlitAPIException(str(e))\n\n number_input_proto = NumberInputProto()\n number_input_proto.data_type = (\n NumberInputProto.INT if all_ints else NumberInputProto.FLOAT\n )\n number_input_proto.label = label\n number_input_proto.default = value\n\n if min_value is not None:\n number_input_proto.min = min_value\n number_input_proto.has_min = True\n\n if max_value is not None:\n number_input_proto.max = max_value\n number_input_proto.has_max = True\n\n if step is not None:\n number_input_proto.step = step\n\n if format is not None:\n number_input_proto.format = format\n\n ui_value = register_widget(\"number_input\", number_input_proto, user_key=key)\n\n return_value = ui_value if ui_value is not None else value\n return self.dg._enqueue(\"number_input\", number_input_proto, return_value)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/number_input.py" } ]
diff --git a/lib/streamlit/elements/number_input.py b/lib/streamlit/elements/number_input.py index 5db6edec1998..455ccbd4db4c 100644 --- a/lib/streamlit/elements/number_input.py +++ b/lib/streamlit/elements/number_input.py @@ -75,7 +75,7 @@ def number_input( """ if isinstance(value, NoValue): - if min_value: + if min_value is not None: value = min_value else: value = 0.0 # We set a float as default diff --git a/lib/tests/streamlit/number_input_test.py b/lib/tests/streamlit/number_input_test.py index 1e1fa50121be..cb1378935850 100644 --- a/lib/tests/streamlit/number_input_test.py +++ b/lib/tests/streamlit/number_input_test.py @@ -35,6 +35,11 @@ def test_data_type(self): st.number_input("Label", value=0.5) c = self.get_delta_from_queue().new_element.number_input self.assertEqual(NumberInput.FLOAT, c.data_type) + + def test_min_value_zero_sets_default_value(self): + st.number_input("Label", 0, 10) + c = self.get_delta_from_queue().new_element.number_input + self.assertEqual(c.default, 0) # the 0 we provided, not 0.0! def test_just_label(self): """Test that it can be called with no value."""
shuup__shuup-1574
Admin: Main menu won't stay hidden Two issues (at least): Desktop: If I close (minimize, desktop) main-menu and click any link, the menu appears again. Desktop to mobile: If I minimize the menu on a bigger desktop and then drag window smaller the menu appears again.
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, View\n\n\nclass MenuView(TemplateView):\n template_name = \"shuup/admin/base/_main_menu.jinja\"\n\n\nclass MenuToggleView(View):\n def post(self, request, *args, **kwargs):\n request.session[\"menu_open\"] = int(request.POST.get(\"menu_open\", 0))\n return JsonResponse({\"success\": True})\n", "path": "shuup/admin/views/menu.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, View\n\n\nclass MenuView(TemplateView):\n template_name = \"shuup/admin/base/_main_menu.jinja\"\n\n\nclass MenuToggleView(View):\n def post(self, request, *args, **kwargs):\n request.session[\"menu_open\"] = not bool(request.session.get(\"menu_open\", True))\n return JsonResponse({\"success\": True})\n", "path": "shuup/admin/views/menu.py" } ]
diff --git a/README.rst b/README.rst index 3f5e38dc5c..efa8972321 100644 --- a/README.rst +++ b/README.rst @@ -95,7 +95,7 @@ Roadmap * Per object placeholders. Option to add content per contact group, category, product and CMS page. `#1220 <https://github.com/shuup/shuup/issues/1220>`__ :white_check_mark:. * Pricing cache. To improve the performance issues with complex catalog campaigns. `#1163 <https://github.com/shuup/shuup/issues/1163>`__ :white_check_mark:. * Option for 'centrally' or 'separately' managed products. `#1275 <https://github.com/shuup/shuup/issues/1275>`__. -* Improve shop product purchasable attribute. `#1281 <https://github.com/shuup/shuup/issues/1281>`__. +* Improve shop product purchasable attribute. `#1281 <https://github.com/shuup/shuup/issues/1281>`__ :white_check_mark:. * Improve product stock behavior. `#1249 <https://github.com/shuup/shuup/issues/1249>`__. * Improved unit tests for the multishop feature. `#1160 <https://github.com/shuup/shuup/issues/1160>`__. * Improve order status and order status history. `#1211 <https://github.com/shuup/shuup/issues/1211>`__. @@ -111,6 +111,8 @@ OS Addons * `Shuup Checkoutfi <https://github.com/shuup/shuup-checkoutfi>`__. Checkout.fi integration for Shuup. * `Shuup Yaml <https://github.com/shuup/shuup-yaml>`__. Import categories, manufacturers and products to Shuup. * `Shuup Mailchimp <https://github.com/shuup/shuup-mailchimp>`__. Mailchimp integration for Shuup. +* `Shuup Xtheme Layouts <https://github.com/shuup/shuup-xtheme-extra-layouts>`__. Xtheme layouts for Shuup. + The purpose of these addons, is to demonstrate how to build other simple addons to extend Shuup. To learn more, here are diff --git a/doc/api/shuup.addons.rst b/doc/api/shuup.addons.rst index e263b093f1..e3f878f174 100644 --- a/doc/api/shuup.addons.rst +++ b/doc/api/shuup.addons.rst @@ -35,14 +35,6 @@ shuup.addons.reloader module :undoc-members: :show-inheritance: -shuup.addons.verify module --------------------------- - -.. automodule:: shuup.addons.verify - :members: - :undoc-members: - :show-inheritance: - Module contents --------------- diff --git a/doc/api/shuup.admin.forms.rst b/doc/api/shuup.admin.forms.rst index fa8a5ddd7a..c7a90a8e2e 100644 --- a/doc/api/shuup.admin.forms.rst +++ b/doc/api/shuup.admin.forms.rst @@ -12,6 +12,14 @@ shuup.admin.forms.fields module :undoc-members: :show-inheritance: +shuup.admin.forms.quick\_select module +-------------------------------------- + +.. automodule:: shuup.admin.forms.quick_select + :members: + :undoc-members: + :show-inheritance: + shuup.admin.forms.widgets module -------------------------------- diff --git a/doc/api/shuup.admin.modules.categories.views.rst b/doc/api/shuup.admin.modules.categories.views.rst index 3950251a2e..9cd9034406 100644 --- a/doc/api/shuup.admin.modules.categories.views.rst +++ b/doc/api/shuup.admin.modules.categories.views.rst @@ -36,6 +36,14 @@ shuup.admin.modules.categories.views.list module :undoc-members: :show-inheritance: +shuup.admin.modules.categories.views.organize module +---------------------------------------------------- + +.. automodule:: shuup.admin.modules.categories.views.organize + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/doc/api/shuup.admin.modules.contact_group_price_display.rst b/doc/api/shuup.admin.modules.contact_group_price_display.rst new file mode 100644 index 0000000000..fdb902015a --- /dev/null +++ b/doc/api/shuup.admin.modules.contact_group_price_display.rst @@ -0,0 +1,17 @@ +shuup.admin.modules.contact\_group\_price\_display package +========================================================== + +Subpackages +----------- + +.. toctree:: + + shuup.admin.modules.contact_group_price_display.views + +Module contents +--------------- + +.. automodule:: shuup.admin.modules.contact_group_price_display + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.admin.modules.contact_group_price_display.views.rst b/doc/api/shuup.admin.modules.contact_group_price_display.views.rst new file mode 100644 index 0000000000..2dacf527f3 --- /dev/null +++ b/doc/api/shuup.admin.modules.contact_group_price_display.views.rst @@ -0,0 +1,38 @@ +shuup.admin.modules.contact\_group\_price\_display.views package +================================================================ + +Submodules +---------- + +shuup.admin.modules.contact\_group\_price\_display.views.edit module +-------------------------------------------------------------------- + +.. automodule:: shuup.admin.modules.contact_group_price_display.views.edit + :members: + :undoc-members: + :show-inheritance: + +shuup.admin.modules.contact\_group\_price\_display.views.forms module +--------------------------------------------------------------------- + +.. automodule:: shuup.admin.modules.contact_group_price_display.views.forms + :members: + :undoc-members: + :show-inheritance: + +shuup.admin.modules.contact\_group\_price\_display.views.list module +-------------------------------------------------------------------- + +.. automodule:: shuup.admin.modules.contact_group_price_display.views.list + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: shuup.admin.modules.contact_group_price_display.views + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.admin.modules.contacts.rst b/doc/api/shuup.admin.modules.contacts.rst index 9e9e0f986d..c7e76447ce 100644 --- a/doc/api/shuup.admin.modules.contacts.rst +++ b/doc/api/shuup.admin.modules.contacts.rst @@ -43,6 +43,14 @@ shuup.admin.modules.contacts.sections module :undoc-members: :show-inheritance: +shuup.admin.modules.contacts.utils module +----------------------------------------- + +.. automodule:: shuup.admin.modules.contacts.utils + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/doc/api/shuup.admin.modules.rst b/doc/api/shuup.admin.modules.rst index 22db6cdd98..3064186139 100644 --- a/doc/api/shuup.admin.modules.rst +++ b/doc/api/shuup.admin.modules.rst @@ -8,6 +8,7 @@ Subpackages shuup.admin.modules.attributes shuup.admin.modules.categories + shuup.admin.modules.contact_group_price_display shuup.admin.modules.contact_groups shuup.admin.modules.contacts shuup.admin.modules.content diff --git a/doc/api/shuup.admin.utils.rst b/doc/api/shuup.admin.utils.rst index 6ee83c8b14..2dc7596f56 100644 --- a/doc/api/shuup.admin.utils.rst +++ b/doc/api/shuup.admin.utils.rst @@ -20,6 +20,14 @@ shuup.admin.utils.forms module :undoc-members: :show-inheritance: +shuup.admin.utils.menu module +----------------------------- + +.. automodule:: shuup.admin.utils.menu + :members: + :undoc-members: + :show-inheritance: + shuup.admin.utils.permissions module ------------------------------------ diff --git a/doc/api/shuup.admin.views.rst b/doc/api/shuup.admin.views.rst index 2803a1078c..80ca1a028c 100644 --- a/doc/api/shuup.admin.views.rst +++ b/doc/api/shuup.admin.views.rst @@ -12,6 +12,14 @@ shuup.admin.views.dashboard module :undoc-members: :show-inheritance: +shuup.admin.views.edit module +----------------------------- + +.. automodule:: shuup.admin.views.edit + :members: + :undoc-members: + :show-inheritance: + shuup.admin.views.home module ----------------------------- diff --git a/doc/api/shuup.campaigns.rst b/doc/api/shuup.campaigns.rst index c42f52eddc..7f97c398e2 100644 --- a/doc/api/shuup.campaigns.rst +++ b/doc/api/shuup.campaigns.rst @@ -32,6 +32,14 @@ shuup.campaigns.consts module :undoc-members: :show-inheritance: +shuup.campaigns.exceptions module +--------------------------------- + +.. automodule:: shuup.campaigns.exceptions + :members: + :undoc-members: + :show-inheritance: + shuup.campaigns.modules module ------------------------------ diff --git a/doc/api/shuup.core.rst b/doc/api/shuup.core.rst index 0bb92deb9c..7f924c283d 100644 --- a/doc/api/shuup.core.rst +++ b/doc/api/shuup.core.rst @@ -74,6 +74,14 @@ shuup.core.shop\_provider module :undoc-members: :show-inheritance: +shuup.core.signal\_handers module +--------------------------------- + +.. automodule:: shuup.core.signal_handers + :members: + :undoc-members: + :show-inheritance: + shuup.core.signals module ------------------------- diff --git a/doc/api/shuup.core.utils.rst b/doc/api/shuup.core.utils.rst index 2013f3b073..67080c10f4 100644 --- a/doc/api/shuup.core.utils.rst +++ b/doc/api/shuup.core.utils.rst @@ -84,6 +84,14 @@ shuup.core.utils.name\_mixin module :undoc-members: :show-inheritance: +shuup.core.utils.price\_cache module +------------------------------------ + +.. automodule:: shuup.core.utils.price_cache + :members: + :undoc-members: + :show-inheritance: + shuup.core.utils.price\_display module -------------------------------------- diff --git a/doc/api/shuup.customer_group_pricing.rst b/doc/api/shuup.customer_group_pricing.rst index 9d60c2509a..48c1e7f890 100644 --- a/doc/api/shuup.customer_group_pricing.rst +++ b/doc/api/shuup.customer_group_pricing.rst @@ -36,6 +36,14 @@ shuup.customer\_group\_pricing.module module :undoc-members: :show-inheritance: +shuup.customer\_group\_pricing.signal\_handers module +----------------------------------------------------- + +.. automodule:: shuup.customer_group_pricing.signal_handers + :members: + :undoc-members: + :show-inheritance: + shuup.customer\_group\_pricing.utils module ------------------------------------------- diff --git a/doc/api/shuup.discounts.admin.modules.rst b/doc/api/shuup.discounts.admin.modules.rst new file mode 100644 index 0000000000..8cb1abfa27 --- /dev/null +++ b/doc/api/shuup.discounts.admin.modules.rst @@ -0,0 +1,10 @@ +shuup.discounts.admin.modules package +===================================== + +Module contents +--------------- + +.. automodule:: shuup.discounts.admin.modules + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.admin.rst b/doc/api/shuup.discounts.admin.rst new file mode 100644 index 0000000000..041061f40f --- /dev/null +++ b/doc/api/shuup.discounts.admin.rst @@ -0,0 +1,38 @@ +shuup.discounts.admin package +============================= + +Subpackages +----------- + +.. toctree:: + + shuup.discounts.admin.modules + shuup.discounts.admin.views + +Submodules +---------- + +shuup.discounts.admin.mass\_actions module +------------------------------------------ + +.. automodule:: shuup.discounts.admin.mass_actions + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.admin.widgets module +------------------------------------ + +.. automodule:: shuup.discounts.admin.widgets + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: shuup.discounts.admin + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.admin.views.rst b/doc/api/shuup.discounts.admin.views.rst new file mode 100644 index 0000000000..3092a631a7 --- /dev/null +++ b/doc/api/shuup.discounts.admin.views.rst @@ -0,0 +1,10 @@ +shuup.discounts.admin.views package +=================================== + +Module contents +--------------- + +.. automodule:: shuup.discounts.admin.views + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.management.commands.rst b/doc/api/shuup.discounts.management.commands.rst new file mode 100644 index 0000000000..2b35f1023d --- /dev/null +++ b/doc/api/shuup.discounts.management.commands.rst @@ -0,0 +1,22 @@ +shuup.discounts.management.commands package +=========================================== + +Submodules +---------- + +shuup.discounts.management.commands.import\_catalog\_campaigns module +--------------------------------------------------------------------- + +.. automodule:: shuup.discounts.management.commands.import_catalog_campaigns + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: shuup.discounts.management.commands + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.management.rst b/doc/api/shuup.discounts.management.rst new file mode 100644 index 0000000000..9140746a36 --- /dev/null +++ b/doc/api/shuup.discounts.management.rst @@ -0,0 +1,17 @@ +shuup.discounts.management package +================================== + +Subpackages +----------- + +.. toctree:: + + shuup.discounts.management.commands + +Module contents +--------------- + +.. automodule:: shuup.discounts.management + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.models.rst b/doc/api/shuup.discounts.models.rst new file mode 100644 index 0000000000..6a69617e0a --- /dev/null +++ b/doc/api/shuup.discounts.models.rst @@ -0,0 +1,10 @@ +shuup.discounts.models package +============================== + +Module contents +--------------- + +.. automodule:: shuup.discounts.models + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.discounts.rst b/doc/api/shuup.discounts.rst new file mode 100644 index 0000000000..c84f09d061 --- /dev/null +++ b/doc/api/shuup.discounts.rst @@ -0,0 +1,71 @@ +shuup.discounts package +======================= + +Subpackages +----------- + +.. toctree:: + + shuup.discounts.admin + shuup.discounts.management + shuup.discounts.models + +Submodules +---------- + +shuup.discounts.apps module +--------------------------- + +.. automodule:: shuup.discounts.apps + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.exceptions module +--------------------------------- + +.. automodule:: shuup.discounts.exceptions + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.modules module +------------------------------ + +.. automodule:: shuup.discounts.modules + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.settings module +------------------------------- + +.. automodule:: shuup.discounts.settings + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.signal\_handers module +-------------------------------------- + +.. automodule:: shuup.discounts.signal_handers + :members: + :undoc-members: + :show-inheritance: + +shuup.discounts.utils module +---------------------------- + +.. automodule:: shuup.discounts.utils + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: shuup.discounts + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.front.apps.simple_search.rst b/doc/api/shuup.front.apps.simple_search.rst index e95e104981..9918ca0c01 100644 --- a/doc/api/shuup.front.apps.simple_search.rst +++ b/doc/api/shuup.front.apps.simple_search.rst @@ -12,6 +12,14 @@ shuup.front.apps.simple\_search.forms module :undoc-members: :show-inheritance: +shuup.front.apps.simple\_search.settings module +----------------------------------------------- + +.. automodule:: shuup.front.apps.simple_search.settings + :members: + :undoc-members: + :show-inheritance: + shuup.front.apps.simple\_search.template\_helpers module -------------------------------------------------------- diff --git a/doc/api/shuup.front.rst b/doc/api/shuup.front.rst index 412847d82a..2f70000c3c 100644 --- a/doc/api/shuup.front.rst +++ b/doc/api/shuup.front.rst @@ -55,6 +55,14 @@ shuup.front.settings module :undoc-members: :show-inheritance: +shuup.front.signal\_handlers module +----------------------------------- + +.. automodule:: shuup.front.signal_handlers + :members: + :undoc-members: + :show-inheritance: + shuup.front.signals module -------------------------- diff --git a/doc/api/shuup.front.utils.rst b/doc/api/shuup.front.utils.rst index f4352e084d..a743001b09 100644 --- a/doc/api/shuup.front.utils.rst +++ b/doc/api/shuup.front.utils.rst @@ -4,6 +4,14 @@ shuup.front.utils package Submodules ---------- +shuup.front.utils.cache module +------------------------------ + +.. automodule:: shuup.front.utils.cache + :members: + :undoc-members: + :show-inheritance: + shuup.front.utils.companies module ---------------------------------- @@ -52,6 +60,14 @@ shuup.front.utils.translation module :undoc-members: :show-inheritance: +shuup.front.utils.urls module +----------------------------- + +.. automodule:: shuup.front.utils.urls + :members: + :undoc-members: + :show-inheritance: + shuup.front.utils.user module ----------------------------- diff --git a/doc/api/shuup.regions.rst b/doc/api/shuup.regions.rst index b642229271..9e55639771 100644 --- a/doc/api/shuup.regions.rst +++ b/doc/api/shuup.regions.rst @@ -12,14 +12,6 @@ shuup.regions.apps module :undoc-members: :show-inheritance: -shuup.regions.data module -------------------------- - -.. automodule:: shuup.regions.data - :members: - :undoc-members: - :show-inheritance: - shuup.regions.resources module ------------------------------ diff --git a/doc/api/shuup.rst b/doc/api/shuup.rst index 93065c4f70..000dbd1f55 100644 --- a/doc/api/shuup.rst +++ b/doc/api/shuup.rst @@ -16,6 +16,7 @@ Subpackages shuup.default_importer shuup.default_reports shuup.default_tax + shuup.discounts shuup.front shuup.gdpr shuup.guide diff --git a/doc/api/shuup.simple_cms.rst b/doc/api/shuup.simple_cms.rst index c3d95fa157..7cff0b6445 100644 --- a/doc/api/shuup.simple_cms.rst +++ b/doc/api/shuup.simple_cms.rst @@ -11,6 +11,14 @@ Subpackages Submodules ---------- +shuup.simple\_cms.layout module +------------------------------- + +.. automodule:: shuup.simple_cms.layout + :members: + :undoc-members: + :show-inheritance: + shuup.simple\_cms.models module ------------------------------- @@ -27,6 +35,14 @@ shuup.simple\_cms.plugins module :undoc-members: :show-inheritance: +shuup.simple\_cms.settings module +--------------------------------- + +.. automodule:: shuup.simple_cms.settings + :members: + :undoc-members: + :show-inheritance: + shuup.simple\_cms.template\_helpers module ------------------------------------------ @@ -35,6 +51,14 @@ shuup.simple\_cms.template\_helpers module :undoc-members: :show-inheritance: +shuup.simple\_cms.templates module +---------------------------------- + +.. automodule:: shuup.simple_cms.templates + :members: + :undoc-members: + :show-inheritance: + shuup.simple\_cms.urls module ----------------------------- diff --git a/doc/api/shuup.utils.rst b/doc/api/shuup.utils.rst index 02dbcac549..623d076b2c 100644 --- a/doc/api/shuup.utils.rst +++ b/doc/api/shuup.utils.rst @@ -124,6 +124,14 @@ shuup.utils.iterables module :undoc-members: :show-inheritance: +shuup.utils.migrations module +----------------------------- + +.. automodule:: shuup.utils.migrations + :members: + :undoc-members: + :show-inheritance: + shuup.utils.models module ------------------------- diff --git a/doc/api/shuup.xtheme.admin_module.rst b/doc/api/shuup.xtheme.admin_module.rst index d99784f1e5..8fa12fa271 100644 --- a/doc/api/shuup.xtheme.admin_module.rst +++ b/doc/api/shuup.xtheme.admin_module.rst @@ -1,13 +1,28 @@ shuup.xtheme.admin\_module package ================================== +Subpackages +----------- + +.. toctree:: + + shuup.xtheme.admin_module.views + Submodules ---------- -shuup.xtheme.admin\_module.views module +shuup.xtheme.admin\_module.utils module --------------------------------------- -.. automodule:: shuup.xtheme.admin_module.views +.. automodule:: shuup.xtheme.admin_module.utils + :members: + :undoc-members: + :show-inheritance: + +shuup.xtheme.admin\_module.widgets module +----------------------------------------- + +.. automodule:: shuup.xtheme.admin_module.widgets :members: :undoc-members: :show-inheritance: diff --git a/doc/api/shuup.xtheme.admin_module.views.rst b/doc/api/shuup.xtheme.admin_module.views.rst new file mode 100644 index 0000000000..b653e22301 --- /dev/null +++ b/doc/api/shuup.xtheme.admin_module.views.rst @@ -0,0 +1,10 @@ +shuup.xtheme.admin\_module.views package +======================================== + +Module contents +--------------- + +.. automodule:: shuup.xtheme.admin_module.views + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.xtheme.layout.rst b/doc/api/shuup.xtheme.layout.rst new file mode 100644 index 0000000000..f23a00f9bf --- /dev/null +++ b/doc/api/shuup.xtheme.layout.rst @@ -0,0 +1,22 @@ +shuup.xtheme.layout package +=========================== + +Submodules +---------- + +shuup.xtheme.layout.utils module +-------------------------------- + +.. automodule:: shuup.xtheme.layout.utils + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: shuup.xtheme.layout + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/shuup.xtheme.rst b/doc/api/shuup.xtheme.rst index fea6e714f9..77f7939f24 100644 --- a/doc/api/shuup.xtheme.rst +++ b/doc/api/shuup.xtheme.rst @@ -7,6 +7,7 @@ Subpackages .. toctree:: shuup.xtheme.admin_module + shuup.xtheme.layout shuup.xtheme.plugins shuup.xtheme.templatetags shuup.xtheme.views @@ -46,14 +47,6 @@ shuup.xtheme.forms module :undoc-members: :show-inheritance: -shuup.xtheme.layout module --------------------------- - -.. automodule:: shuup.xtheme.layout - :members: - :undoc-members: - :show-inheritance: - shuup.xtheme.middleware module ------------------------------ @@ -94,6 +87,14 @@ shuup.xtheme.resources module :undoc-members: :show-inheritance: +shuup.xtheme.settings module +---------------------------- + +.. automodule:: shuup.xtheme.settings + :members: + :undoc-members: + :show-inheritance: + shuup.xtheme.template\_ns module -------------------------------- diff --git a/shuup/admin/npm-shrinkwrap.json b/shuup/admin/npm-shrinkwrap.json index e5a7dd9fd9..cdd49339e5 100644 --- a/shuup/admin/npm-shrinkwrap.json +++ b/shuup/admin/npm-shrinkwrap.json @@ -7,7 +7,7 @@ "@babel/code-frame": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.0.0.tgz", - "integrity": "sha512-OfC2uemaknXr87bdLUkWog7nYuliM9Ij5HUcajsVcMCpQrcLmtxRbVFTIqmcSkSeYRBFBRxs2FiUqFJDLdiebA==", + "integrity": "sha1-BuKrGb21NThVWaq7W6WXKUgoAPg=", "dev": true, "requires": { "@babel/highlight": "^7.0.0" @@ -16,7 +16,7 @@ "@babel/highlight": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.0.0.tgz", - "integrity": "sha512-UFMC4ZeFC48Tpvj7C8UgLvtkaUuovQX+5xNWrsIoMG8o2z+XFKjKaN9iVmS84dPwVN00W4wPmqvYoZF3EGAsfw==", + "integrity": "sha1-9xDDjI1Fjm3ZogGvtjf8t4HOmeQ=", "dev": true, "requires": { "chalk": "^2.0.0", @@ -27,7 +27,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "dev": true, "requires": { "color-convert": "^1.9.0" @@ -36,7 +36,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "dev": true, "requires": { "ansi-styles": "^3.2.1", @@ -49,12 +49,12 @@ "@compone/class": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@compone/class/-/class-1.1.1.tgz", - "integrity": "sha512-pbODcJi0TdyKQ/PTHSHLwO4h/r5EgMdkPQLdBSaZBUiBuWdGil+0PEhpfhAWDuFrwVPKiCHYQOfs8WyGe9ABWA==" + "integrity": "sha1-y+YiXKEjiUdpyMtltJQwvY3qRyo=" }, "@compone/define": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/@compone/define/-/define-1.2.4.tgz", - "integrity": "sha512-w0ZDiYMIppvb1epoNY64pkEACwn9693cc7qM1ZSKWUVZczx5vlR4iZM7by129IYUdCq0SsbxQbbPZjnzj/0Qew==", + "integrity": "sha1-Wc00yV0ywR3nSW21yAHcd0jLhmY=", "requires": { "@compone/class": "^1.1.1", "@compone/event": "^1.1.2" @@ -63,7 +63,7 @@ "@compone/event": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@compone/event/-/event-1.1.2.tgz", - "integrity": "sha512-baJDnAr8pWefqfltNS33HieD+s23YO+w2/RD6lPxIEzlOuM1R5RT5vpUUTcrzn0Er3oj62PlfMUyS0SwnVw67Q==", + "integrity": "sha1-4gymLJoaHAYqlhKHS3hFNzeny+s=", "requires": { "utilise": "^2.3.5" } @@ -71,8 +71,7 @@ "@mrmlnc/readdir-enhanced": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", - "dev": true, + "integrity": "sha1-UkryQNGjYFJ7cwR17PoTRKpUDd4=", "requires": { "call-me-maybe": "^1.0.1", "glob-to-regexp": "^0.3.0" @@ -81,14 +80,12 @@ "@nodelib/fs.stat": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.2.tgz", - "integrity": "sha512-yprFYuno9FtNsSHVlSWd+nRlmGoAbqbeCwOryP6sC/zoCjhpArcRMYp19EvpSUSizJAlsXEwJv+wcWS9XaXdMw==", - "dev": true + "integrity": "sha1-VMWpZEYr49TXivYxNjwY1vqRrCY=" }, "@types/commander": { "version": "2.12.2", "resolved": "https://registry.npmjs.org/@types/commander/-/commander-2.12.2.tgz", - "integrity": "sha512-0QEFiR8ljcHp9bAbWxecjVRuAMr16ivPiGOw6KFQBVrVd0RQIcM3xKdRisH2EDWgVWujiYtHwhSkSUoAAGzH7Q==", - "dev": true, + "integrity": "sha1-GDBBojhC1CgUePpdI8XKeOb9CK4=", "requires": { "commander": "*" } @@ -96,19 +93,18 @@ "@types/node": { "version": "8.10.29", "resolved": "https://registry.npmjs.org/@types/node/-/node-8.10.29.tgz", - "integrity": "sha512-zbteaWZ2mdduacm0byELwtRyhYE40aK+pAanQk415gr1eRuu67x7QGOLmn8jz5zI8LDK7d0WI/oT6r5Trz4rzQ==", + "integrity": "sha1-s6E7WN17BoK/G0ICK+9KWpcY9oc=", "optional": true }, "@types/semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-5.5.0.tgz", - "integrity": "sha512-41qEJgBH/TWgo5NFSvBCJ1qkoi3Q6ONSF2avrHq1LVEZfYpdHmj0y9SuTK+u9ZhG1sYQKBL1AWXKyLWP4RaUoQ==", - "dev": true + "integrity": "sha1-FGwqKe59O65L8vyydGNuJkyBPEU=" }, "JSONStream": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.4.tgz", - "integrity": "sha512-Y7vfi3I5oMOYIr+WxV8NZxDSwcbNgzdKYsTNInmycOq9bUYwGg9ryu57Wg5NLmCjqdFPNUmpMBo3kSJN9tCbXg==", + "integrity": "sha1-YVuyrbDNNMj0xEe19lEvodjxai4=", "requires": { "jsonparse": "^1.2.0", "through": ">=2.2.7 <3" @@ -117,7 +113,7 @@ "abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=" }, "accepts": { "version": "1.3.5", @@ -131,12 +127,12 @@ "acorn": { "version": "5.6.2", "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.6.2.tgz", - "integrity": "sha512-zUzo1E5dI2Ey8+82egfnttyMlMZ2y0D8xOCO3PNPPlYXpl8NZvF6Qk9L9BEtJs+43FqEmfBViDqc5d1ckRDguw==" + "integrity": "sha1-sdode+KsG0oyf7nquFFwLFBFtOc=" }, "acorn-dynamic-import": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/acorn-dynamic-import/-/acorn-dynamic-import-3.0.0.tgz", - "integrity": "sha512-zVWV8Z8lislJoOKKqdNMOB+s6+XV5WERty8MnKBeFgwA+19XJjJHs2RP5dzM57FftIs+jQnRToLiWazKr6sSWg==", + "integrity": "sha1-kBzu5Mf6rvfgetKkfokGddpQong=", "requires": { "acorn": "^5.0.0" } @@ -159,7 +155,7 @@ "acorn-node": { "version": "1.5.2", "resolved": "https://registry.npmjs.org/acorn-node/-/acorn-node-1.5.2.tgz", - "integrity": "sha512-krFKvw/d1F17AN3XZbybIUzEY4YEPNiGo05AfP3dBlfVKrMHETKpgjpuZkSF8qDNt9UkQcqj7am8yJLseklCMg==", + "integrity": "sha1-LKcj3xnZl7BYJLafbH+wkfxCwyI=", "requires": { "acorn": "^5.7.1", "acorn-dynamic-import": "^3.0.0", @@ -169,7 +165,7 @@ "acorn": { "version": "5.7.3", "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.3.tgz", - "integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==" + "integrity": "sha1-Z6ojG/iBKXS4UjWpZ3Hra9B+onk=" } } }, @@ -188,6 +184,14 @@ } } }, + "acorn5-object-spread": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/acorn5-object-spread/-/acorn5-object-spread-4.0.0.tgz", + "integrity": "sha1-1XWAge7ZcSGrC+R+Mcqu8qo5lpc=", + "requires": { + "acorn": "^5.1.2" + } + }, "ajv": { "version": "5.5.2", "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", @@ -213,8 +217,7 @@ "alphanum-sort": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=", - "dev": true + "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=" }, "amdefine": { "version": "1.0.1", @@ -224,7 +227,7 @@ "ansi-escapes": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.1.0.tgz", - "integrity": "sha512-UgAb8H9D41AQnu/PbWlCofQVcnV4Gs2bBJi9eZPxfU/hgglFh3SMDMENRIqdr7H6XFnXdoknctFByVsCOotTVw==", + "integrity": "sha1-9zIHu4EgfXX9bIPxJa8m7qN4yjA=", "dev": true }, "ansi-regex": { @@ -240,8 +243,7 @@ "ansi-to-html": { "version": "0.6.6", "resolved": "https://registry.npmjs.org/ansi-to-html/-/ansi-to-html-0.6.6.tgz", - "integrity": "sha512-90M/2sZna3OsoOEbSyXK46poFnlClBC53Rx6etNKQK7iShsX5fI5E/M9Ld6FurtLaxAWLuAPi0Jp8p3y5oAkxg==", - "dev": true, + "integrity": "sha1-WKjQS4fsmoXjrSc8EqX7xxR7nEI=", "requires": { "entities": "^1.1.1" } @@ -249,7 +251,7 @@ "anymatch": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "integrity": "sha1-vLJLTzeTTZqnrBe0ra+J58du8us=", "requires": { "micromatch": "^3.1.4", "normalize-path": "^2.1.1" @@ -268,7 +270,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -361,7 +363,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -371,14 +373,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -432,7 +434,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -440,7 +442,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -448,7 +450,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -481,12 +483,12 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -508,7 +510,7 @@ "aproba": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" + "integrity": "sha1-aALmJk79GMeQobDVF/DyYnvyyUo=" }, "archiver": { "version": "2.1.1", @@ -541,7 +543,7 @@ "are-we-there-yet": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz", - "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==", + "integrity": "sha1-SzXClE8GKov82mZBB2A1D+nd/CE=", "requires": { "delegates": "^1.0.0", "readable-stream": "^2.0.6" @@ -550,7 +552,7 @@ "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "integrity": "sha1-vNZ5HqWuCXJeF+WtmIE0zUCz2RE=", "requires": { "sprintf-js": "~1.0.2" } @@ -566,7 +568,7 @@ "arr-flatten": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" + "integrity": "sha1-NgSLv/TntH4TZkQxbJlmnqWukfE=" }, "arr-union": { "version": "3.1.0", @@ -632,7 +634,7 @@ "asn1.js": { "version": "4.10.1", "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", - "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "integrity": "sha1-ucK/WAXx5kqt7tbfOiv6+1pz9aA=", "requires": { "bn.js": "^4.0.0", "inherits": "^2.0.1", @@ -655,7 +657,7 @@ "assertion-error": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==" + "integrity": "sha1-5gtrDo8wG9l+U3UhW9pAbIURjAs=" }, "assign-symbols": { "version": "1.0.0", @@ -683,8 +685,7 @@ "async-limiter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz", - "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==", - "dev": true + "integrity": "sha1-ePrtjD0HSrgfIrTphdeehzj3IPg=" }, "asynckit": { "version": "0.4.0", @@ -694,13 +695,12 @@ "atob": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.0.tgz", - "integrity": "sha512-SuiKH8vbsOyCALjA/+EINmt/Kdl+TQPrtFgW7XZZcwtryFu9e5kQoX3bjCW6mIvGH1fbeAZZuvwGR5IlBRznGw==" + "integrity": "sha1-qysVDlHXsSK578jXNAwGtsQQdrw=" }, "autoprefixer": { "version": "8.6.5", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-8.6.5.tgz", - "integrity": "sha512-PLWJN3Xo/rycNkx+mp8iBDMTm3FeWe4VmYaZDSqL5QQB9sLsQkG5k8n+LNDFnhh9kdq2K+egL/icpctOmDHwig==", - "dev": true, + "integrity": "sha1-ND89GT7VaLMgjgARehuW62kdTuk=", "requires": { "browserslist": "^3.2.8", "caniuse-lite": "^1.0.30000864", @@ -713,8 +713,7 @@ "caniuse-lite": { "version": "1.0.30000885", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000885.tgz", - "integrity": "sha512-cXKbYwpxBLd7qHyej16JazPoUacqoVuDhvR61U7Fr5vSxMUiodzcYa1rQYRYfZ5GexV03vGZHd722vNPLjPJGQ==", - "dev": true + "integrity": "sha1-6Inp+OflDnafKkljTJMriu5iKYQ=" } } }, @@ -726,13 +725,12 @@ "aws4": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.7.0.tgz", - "integrity": "sha512-32NDda82rhwD9/JBCCkB+MRYDp0oSvlo2IL6rQWA10PQi7tDUM3eqMSltXmY+Oyl/7N3P3qNtAlv7X0d9bI28w==" + "integrity": "sha1-1NDpudv8p3vwjusKikcVUP454ok=" }, "babel-code-frame": { "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", - "dev": true, "requires": { "chalk": "^1.1.3", "esutils": "^2.0.2", @@ -742,16 +740,14 @@ "js-tokens": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", - "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", - "dev": true + "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=" } } }, "babel-core": { "version": "6.26.3", "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", - "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", - "dev": true, + "integrity": "sha1-suLwnjQtDwyI4vAuBneUEl51wgc=", "requires": { "babel-code-frame": "^6.26.0", "babel-generator": "^6.26.0", @@ -777,14 +773,12 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" }, "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -794,8 +788,7 @@ "babel-generator": { "version": "6.26.1", "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", - "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", - "dev": true, + "integrity": "sha1-GERAjTuPDTWkBOp6wYDwh6YBvZA=", "requires": { "babel-messages": "^6.23.0", "babel-runtime": "^6.26.0", @@ -810,14 +803,12 @@ "jsesc": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", - "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=", - "dev": true + "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=" }, "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -825,7 +816,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz", "integrity": "sha1-zORReto1b0IgvK6KAsKzRvmlZmQ=", - "dev": true, "requires": { "babel-helper-explode-assignable-expression": "^6.24.1", "babel-runtime": "^6.22.0", @@ -836,7 +826,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-helper-builder-react-jsx/-/babel-helper-builder-react-jsx-6.26.0.tgz", "integrity": "sha1-Of+DE7dci2Xc7/HzHTg+D/KkCKA=", - "dev": true, "requires": { "babel-runtime": "^6.26.0", "babel-types": "^6.26.0", @@ -847,7 +836,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", "integrity": "sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340=", - "dev": true, "requires": { "babel-helper-hoist-variables": "^6.24.1", "babel-runtime": "^6.22.0", @@ -859,7 +847,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", "integrity": "sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8=", - "dev": true, "requires": { "babel-helper-function-name": "^6.24.1", "babel-runtime": "^6.26.0", @@ -870,8 +857,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -879,7 +865,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz", "integrity": "sha1-8luCz33BBDPFX3BZLVdGQArCLKo=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-traverse": "^6.24.1", @@ -890,7 +875,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", "integrity": "sha1-00dbjAPtmCQqJbSDUasYOZ01gKk=", - "dev": true, "requires": { "babel-helper-get-function-arity": "^6.24.1", "babel-runtime": "^6.22.0", @@ -903,7 +887,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", "integrity": "sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -913,7 +896,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", "integrity": "sha1-HssnaJydJVE+rbyZFKc/VAi+enY=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -923,7 +905,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", "integrity": "sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -933,7 +914,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", "integrity": "sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI=", - "dev": true, "requires": { "babel-runtime": "^6.26.0", "babel-types": "^6.26.0", @@ -943,8 +923,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -952,7 +931,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz", "integrity": "sha1-XsWBgnrXI/7N04HxySg5BnbkVRs=", - "dev": true, "requires": { "babel-helper-function-name": "^6.24.1", "babel-runtime": "^6.22.0", @@ -965,7 +943,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", "integrity": "sha1-v22/5Dk40XNpohPKiov3S2qQqxo=", - "dev": true, "requires": { "babel-helper-optimise-call-expression": "^6.24.1", "babel-messages": "^6.23.0", @@ -979,7 +956,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", "integrity": "sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-template": "^6.24.1" @@ -989,7 +965,6 @@ "version": "6.23.0", "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", "integrity": "sha1-8830cDhYA1sqKVHG7F7fbGLyYw4=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -998,7 +973,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", "integrity": "sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1006,32 +980,27 @@ "babel-plugin-syntax-async-functions": { "version": "6.13.0", "resolved": "https://registry.npmjs.org/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz", - "integrity": "sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU=", - "dev": true + "integrity": "sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU=" }, "babel-plugin-syntax-exponentiation-operator": { "version": "6.13.0", "resolved": "https://registry.npmjs.org/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz", - "integrity": "sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4=", - "dev": true + "integrity": "sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4=" }, "babel-plugin-syntax-jsx": { "version": "6.18.0", "resolved": "http://registry.npmjs.org/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz", - "integrity": "sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY=", - "dev": true + "integrity": "sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY=" }, "babel-plugin-syntax-trailing-function-commas": { "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz", - "integrity": "sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM=", - "dev": true + "integrity": "sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM=" }, "babel-plugin-transform-async-to-generator": { "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz", "integrity": "sha1-ZTbjeK/2yx1VF6wOQOs+n8jQh2E=", - "dev": true, "requires": { "babel-helper-remap-async-to-generator": "^6.24.1", "babel-plugin-syntax-async-functions": "^6.8.0", @@ -1042,7 +1011,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", "integrity": "sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1051,7 +1019,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", "integrity": "sha1-u8UbSflk1wy42OC5ToICRs46YUE=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1060,7 +1027,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", "integrity": "sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8=", - "dev": true, "requires": { "babel-runtime": "^6.26.0", "babel-template": "^6.26.0", @@ -1072,8 +1038,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -1081,7 +1046,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", "integrity": "sha1-WkxYpQyclGHlZLSyo7+ryXolhNs=", - "dev": true, "requires": { "babel-helper-define-map": "^6.24.1", "babel-helper-function-name": "^6.24.1", @@ -1098,7 +1062,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", "integrity": "sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-template": "^6.24.1" @@ -1108,7 +1071,6 @@ "version": "6.23.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", "integrity": "sha1-mXux8auWf2gtKwh2/jWNYOdlxW0=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1117,7 +1079,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", "integrity": "sha1-c+s9MQypaePvnskcU3QabxV2Qj4=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -1127,7 +1088,6 @@ "version": "6.23.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", "integrity": "sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1136,7 +1096,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", "integrity": "sha1-g0yJhTvDaxrw86TF26qU/Y6sqos=", - "dev": true, "requires": { "babel-helper-function-name": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1147,7 +1106,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", "integrity": "sha1-T1SgLWzWbPkVKAAZox0xklN3yi4=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1156,7 +1114,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", "integrity": "sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ=", - "dev": true, "requires": { "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1166,8 +1123,7 @@ "babel-plugin-transform-es2015-modules-commonjs": { "version": "6.26.2", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", - "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", - "dev": true, + "integrity": "sha1-WKeThjqefKhwvcWogRF/+sJ9tvM=", "requires": { "babel-plugin-transform-strict-mode": "^6.24.1", "babel-runtime": "^6.26.0", @@ -1179,7 +1135,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", "integrity": "sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM=", - "dev": true, "requires": { "babel-helper-hoist-variables": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1190,7 +1145,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", "integrity": "sha1-rJl+YoXNGO1hdq22B9YCNErThGg=", - "dev": true, "requires": { "babel-plugin-transform-es2015-modules-amd": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1201,7 +1155,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", "integrity": "sha1-JM72muIcuDp/hgPa0CH1cusnj40=", - "dev": true, "requires": { "babel-helper-replace-supers": "^6.24.1", "babel-runtime": "^6.22.0" @@ -1211,7 +1164,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", "integrity": "sha1-V6w1GrScrxSpfNE7CfZv3wpiXys=", - "dev": true, "requires": { "babel-helper-call-delegate": "^6.24.1", "babel-helper-get-function-arity": "^6.24.1", @@ -1225,7 +1177,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", "integrity": "sha1-JPh11nIch2YbvZmkYi5R8U3jiqA=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -1235,7 +1186,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", "integrity": "sha1-1taKmfia7cRTbIGlQujdnxdG+NE=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1244,7 +1194,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", "integrity": "sha1-AMHNsaynERLN8M9hJsLta0V8zbw=", - "dev": true, "requires": { "babel-helper-regex": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1255,7 +1204,6 @@ "version": "6.22.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", "integrity": "sha1-qEs0UPfp+PH2g51taH2oS7EjbY0=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1264,7 +1212,6 @@ "version": "6.23.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", "integrity": "sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I=", - "dev": true, "requires": { "babel-runtime": "^6.22.0" } @@ -1273,7 +1220,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", "integrity": "sha1-04sS9C6nMj9yk4fxinxa4frrNek=", - "dev": true, "requires": { "babel-helper-regex": "^6.24.1", "babel-runtime": "^6.22.0", @@ -1284,7 +1230,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", "integrity": "sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA=", - "dev": true, "requires": { "regenerate": "^1.2.1", "regjsgen": "^0.2.0", @@ -1297,7 +1242,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz", "integrity": "sha1-KrDJx/MJj6SJB3cruBP+QejeOg4=", - "dev": true, "requires": { "babel-helper-builder-binary-assignment-operator-visitor": "^6.24.1", "babel-plugin-syntax-exponentiation-operator": "^6.8.0", @@ -1308,7 +1252,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-react-jsx/-/babel-plugin-transform-react-jsx-6.24.1.tgz", "integrity": "sha1-hAoCjn30YN/DotKfDA2R9jduZqM=", - "dev": true, "requires": { "babel-helper-builder-react-jsx": "^6.24.1", "babel-plugin-syntax-jsx": "^6.8.0", @@ -1319,7 +1262,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", "integrity": "sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8=", - "dev": true, "requires": { "regenerator-transform": "^0.10.0" } @@ -1328,7 +1270,6 @@ "version": "6.24.1", "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", "integrity": "sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g=", - "dev": true, "requires": { "babel-runtime": "^6.22.0", "babel-types": "^6.24.1" @@ -1337,8 +1278,7 @@ "babel-plugin-wildcard": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/babel-plugin-wildcard/-/babel-plugin-wildcard-5.0.0.tgz", - "integrity": "sha512-RhbEQV7HbqfreBW41R2ExJV4SdeG3pnno2gYqbucrrzhBub3/72524yNeQbPa0dyVJQwGS/DZ2X6MeiWSdErlQ==", - "dev": true, + "integrity": "sha1-cagoTYH3UwviBV+0fg04jLlqSOw=", "requires": { "rimraf": "^2.6.2" } @@ -1346,8 +1286,7 @@ "babel-preset-env": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.7.0.tgz", - "integrity": "sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==", - "dev": true, + "integrity": "sha1-3qefpOvriDzTXasH4mDBycBN93o=", "requires": { "babel-plugin-check-es2015-constants": "^6.22.0", "babel-plugin-syntax-trailing-function-commas": "^6.22.0", @@ -1384,8 +1323,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", - "dev": true, + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -1394,14 +1332,12 @@ "electron-to-chromium": { "version": "1.3.48", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.48.tgz", - "integrity": "sha1-07DYWTgUBE4JLs4hCPw6ya6kuQA=", - "dev": true + "integrity": "sha1-07DYWTgUBE4JLs4hCPw6ya6kuQA=" }, "semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", - "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==", - "dev": true + "integrity": "sha1-3Eu8emyp2Rbe5dQ1FvAJK1j3uKs=" } } }, @@ -1409,7 +1345,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", "integrity": "sha1-btAhFz4vy0htestFxgCahW9kcHE=", - "dev": true, "requires": { "babel-core": "^6.26.0", "babel-runtime": "^6.26.0", @@ -1423,8 +1358,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -1432,7 +1366,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=", - "dev": true, "requires": { "core-js": "^2.4.0", "regenerator-runtime": "^0.11.0" @@ -1441,8 +1374,7 @@ "core-js": { "version": "2.5.7", "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", - "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==", - "dev": true + "integrity": "sha1-+XJgj/DOrWi4QaFqky0LGDeRgU4=" } } }, @@ -1450,7 +1382,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", "integrity": "sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI=", - "dev": true, "requires": { "babel-runtime": "^6.26.0", "babel-traverse": "^6.26.0", @@ -1462,14 +1393,12 @@ "babylon": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", - "dev": true + "integrity": "sha1-ry87iPpvXB5MY00aD46sT1WzleM=" }, "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -1477,7 +1406,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", "integrity": "sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4=", - "dev": true, "requires": { "babel-code-frame": "^6.26.0", "babel-messages": "^6.23.0", @@ -1493,20 +1421,17 @@ "babylon": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", - "dev": true + "integrity": "sha1-ry87iPpvXB5MY00aD46sT1WzleM=" }, "globals": { "version": "9.18.0", "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", - "dev": true + "integrity": "sha1-qjiWs+abSH8X4x7SFD1pqOMMLYo=" }, "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, @@ -1514,7 +1439,6 @@ "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=", - "dev": true, "requires": { "babel-runtime": "^6.26.0", "esutils": "^2.0.2", @@ -1525,22 +1449,19 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", - "dev": true + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, "babylon": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", - "dev": true + "integrity": "sha1-ry87iPpvXB5MY00aD46sT1WzleM=" }, "babylon-walk": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/babylon-walk/-/babylon-walk-1.0.2.tgz", "integrity": "sha1-OxWl3btIKni0zpwByLoYFwLZ1s4=", - "dev": true, "requires": { "babel-runtime": "^6.11.6", "babel-types": "^6.15.0", @@ -1555,7 +1476,7 @@ "base": { "version": "0.11.2", "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "integrity": "sha1-e95c7RRbbVUakNuH+DxVi060io8=", "requires": { "cache-base": "^1.0.1", "class-utils": "^0.3.5", @@ -1577,7 +1498,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -1585,7 +1506,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -1593,7 +1514,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -1608,14 +1529,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "base64-js": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.0.tgz", - "integrity": "sha512-ccav/yGvoa80BQDljCxsmmQ3Xvx60/UpBIij5QN21W3wBi/hhIC9OoO+KLpu9IJTS9j4DRVJ3aDDF9cMSoa2lw==" + "integrity": "sha1-yrHmEY8FEJXli1KBrqjBzSK/wOM=" }, "bcrypt-pbkdf": { "version": "1.0.1", @@ -1629,8 +1550,7 @@ "big.js": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", - "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==", - "dev": true + "integrity": "sha1-pfwpi4G54Nyi5FiCR4S2XFK6WI4=" }, "binary": { "version": "0.3.0", @@ -1650,13 +1570,12 @@ "bindings": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.2.1.tgz", - "integrity": "sha1-FK1hE4EtLTfXLme0ystLtyZQXxE=", - "dev": true + "integrity": "sha1-FK1hE4EtLTfXLme0ystLtyZQXxE=" }, "bl": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.2.tgz", - "integrity": "sha512-e8tQYnZodmebYDWGH7KMRvtzKXaJHx3BbilrgZCfvyLUYdKpK1t5PSPmpkny/SgiTSCnjfLW7v5rlONXVFkQEA==", + "integrity": "sha1-oWCRFxcQPAdBDO9j71Gzl8Alr5w=", "requires": { "readable-stream": "^2.3.5", "safe-buffer": "^5.1.1" @@ -1673,7 +1592,7 @@ "bn.js": { "version": "4.11.8", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", - "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==" + "integrity": "sha1-LN4J617jQfSEdGuwMJsyU7GxRC8=" }, "body-parser": { "version": "1.18.2", @@ -1695,15 +1614,14 @@ "qs": { "version": "6.5.1", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", - "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==" + "integrity": "sha1-NJzfbu+J7EXBLX1es/wMhwNDptg=" } } }, "boolbase": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", - "dev": true + "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" }, "boom": { "version": "4.3.1", @@ -1716,7 +1634,7 @@ "bootstrap": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-4.1.3.tgz", - "integrity": "sha512-rDFIzgXcof0jDyjNosjv4Sno77X4KuPeFxG2XZZv1/Kc8DRVGVADdoQyyOVDwPqL36DDmtCQbrpMCqvpPLJQ0w==" + "integrity": "sha1-DrNxryyESOjCEEEdDLgkpkCaEr4=" }, "bootstrap-datetime-picker": { "version": "2.4.4", @@ -1726,7 +1644,7 @@ "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "integrity": "sha1-PH/L9SnYcibz0vUrlm/1Jx60Qd0=", "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -1745,8 +1663,7 @@ "brfs": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/brfs/-/brfs-1.6.1.tgz", - "integrity": "sha512-OfZpABRQQf+Xsmju8XE9bDjs+uU4vLREGolP7bDgcpsI17QREyZ4Bl+2KLxxx1kCgA0fAIhKQBaBYh+PEcCqYQ==", - "dev": true, + "integrity": "sha1-t4ziM22BjiXuoEoJR8um1PuIScM=", "requires": { "quote-stream": "^1.0.1", "resolve": "^1.1.5", @@ -1779,7 +1696,7 @@ "browser-pack": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/browser-pack/-/browser-pack-6.1.0.tgz", - "integrity": "sha512-erYug8XoqzU3IfcU8fUgyHqyOXqIE4tUTTQ+7mqUjQlvnXkOO6OlT9c/ZoJVHYoAaqGxr09CN53G7XIsO4KtWA==", + "integrity": "sha1-w0uhDQuc4WK1ryJ8cTHJLC7NV3Q=", "requires": { "JSONStream": "^1.0.3", "combine-source-map": "~0.8.0", @@ -1792,7 +1709,7 @@ "browser-resolve": { "version": "1.11.3", "resolved": "https://registry.npmjs.org/browser-resolve/-/browser-resolve-1.11.3.tgz", - "integrity": "sha512-exDi1BYWB/6raKHmDTCicQfTkqwN5fioMFV4j8BsfMU4R2DK/QfZfK7kOVkmWCNANf0snkBzqGqAJBao9gZMdQ==", + "integrity": "sha1-m3y7PQ9RDky4a9vXlhJNKLWJCvY=", "requires": { "resolve": "1.1.7" }, @@ -1807,7 +1724,7 @@ "browser-stdout": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==" + "integrity": "sha1-uqVZ7hTO1zRSIputcyZGfGH6vWA=" }, "browserify": { "version": "12.0.2", @@ -1986,7 +1903,7 @@ "browserify-cipher": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "integrity": "sha1-jWR0wbhwv9q807z8wZNKEOlPFfA=", "requires": { "browserify-aes": "^1.0.4", "browserify-des": "^1.0.0", @@ -1996,7 +1913,7 @@ "browserify-aes": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "integrity": "sha1-Mmc0ZC9APavDADIJhTu3CtQo70g=", "requires": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -2011,7 +1928,7 @@ "browserify-des": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.1.tgz", - "integrity": "sha512-zy0Cobe3hhgpiOM32Tj7KQ3Vl91m0njwsjzZQK1L+JDf11dzP9qIvjreVinsvXrgfjhStXwUWAEpB9D7Gwmayw==", + "integrity": "sha1-M0MSTbbXrVPiaogmMYcSvchFD5w=", "requires": { "cipher-base": "^1.0.1", "des.js": "^1.0.0", @@ -2052,8 +1969,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", - "dev": true, + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -2062,7 +1978,7 @@ "buble": { "version": "0.16.0", "resolved": "https://registry.npmjs.org/buble/-/buble-0.16.0.tgz", - "integrity": "sha512-Eb5vt1+IvXXPyYD1IIQIuaBwIuJOSWQ2kXzULlg5I83aLGF2qzcjRU2joYusnWFgAenvJ9xTOMvZvT0bb8BLbg==", + "integrity": "sha1-F3PntaOD9ccir2sbFrK6ScuGapg=", "requires": { "acorn": "^3.3.0", "acorn-jsx": "^3.0.1", @@ -2085,7 +2001,6 @@ "version": "4.9.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", "integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=", - "dev": true, "requires": { "base64-js": "^1.0.2", "ieee754": "^1.1.4", @@ -2095,7 +2010,7 @@ "buffer-alloc": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "integrity": "sha1-iQ3ZDZI6hz4I4Q5f1RpX5bfM4Ow=", "requires": { "buffer-alloc-unsafe": "^1.1.0", "buffer-fill": "^1.0.0" @@ -2104,7 +2019,7 @@ "buffer-alloc-unsafe": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + "integrity": "sha1-vX3CauKXLQ7aJTvgYdupkjScGfA=" }, "buffer-crc32": { "version": "0.2.13", @@ -2119,7 +2034,7 @@ "buffer-from": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.0.0.tgz", - "integrity": "sha512-83apNb8KK0Se60UE1+4Ukbe3HbfELJ6UlI4ldtOGs7So4KD26orJM8hIY9lxdzP+UpItH1Yh/Y8GUvNFWFFRxA==" + "integrity": "sha1-TLiDLSNhJYmwQG6eKVbBfwb99TE=" }, "buffer-xor": { "version": "1.0.3", @@ -2150,7 +2065,7 @@ "cache-base": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "integrity": "sha1-Cn9GQWgxyLZi7jb+TnxZ129marI=", "requires": { "collection-visit": "^1.0.0", "component-emitter": "^1.2.1", @@ -2178,8 +2093,7 @@ "call-me-maybe": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", - "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=", - "dev": true + "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" }, "caller-path": { "version": "0.1.0", @@ -2216,7 +2130,6 @@ "version": "1.6.1", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-1.6.1.tgz", "integrity": "sha1-tTTnxzTE+B7F++isoq0kNUuWLGw=", - "dev": true, "requires": { "browserslist": "^1.3.6", "caniuse-db": "^1.0.30000529", @@ -2228,7 +2141,6 @@ "version": "1.7.7", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-1.7.7.tgz", "integrity": "sha1-C9dnBCWL6CmyOYu1Dkti0aFmsLk=", - "dev": true, "requires": { "caniuse-db": "^1.0.30000639", "electron-to-chromium": "^1.2.7" @@ -2239,14 +2151,12 @@ "caniuse-db": { "version": "1.0.30000885", "resolved": "https://registry.npmjs.org/caniuse-db/-/caniuse-db-1.0.30000885.tgz", - "integrity": "sha512-Hy1a+UIXooG+tRlt3WnT9avMf+l999bR9J1MqlQdYKgbsYjKxV4a4rgcmiyMmdCLPBFsiRoDxdl9tnNyaq2RXw==", - "dev": true + "integrity": "sha1-zcmN0WjtWWeGUAcff2pwkQ4nW8g=" }, "caniuse-lite": { "version": "1.0.30000850", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000850.tgz", - "integrity": "sha512-iHK48UR/InydhpPAzgSmsJXRAR925T0kwJhZ1wk0xRatpGMvi2f06LABg6HXfV4WW4P2wChzlcFa/TEmbTyXQA==", - "dev": true + "integrity": "sha1-5oqI206lmLTDO4QZ9zhUc+SAJJU=" }, "caseless": { "version": "0.12.0", @@ -2297,13 +2207,13 @@ "chardet": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "integrity": "sha1-kAlISfCTfy7twkJdDSip5fDLrZ4=", "dev": true }, "chart.js": { "version": "2.7.2", "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-2.7.2.tgz", - "integrity": "sha512-90wl3V9xRZ8tnMvMlpcW+0Yg13BelsGS9P9t0ClaDxv/hdypHDr/YAGf+728m11P5ljwyB0ZHfPKCapZFqSqYA==", + "integrity": "sha1-PJ/eTcW5VgghG97+2n5dM9/6VxQ=", "requires": { "chartjs-color": "^2.1.0", "moment": "^2.10.2" @@ -2328,7 +2238,7 @@ "chartjs-color-string": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/chartjs-color-string/-/chartjs-color-string-0.5.0.tgz", - "integrity": "sha512-amWNvCOXlOUYxZVDSa0YOab5K/lmEhbFNKI55PWc4mlv28BDzA7zaoQTGxSBgJMHIW+hGX8YUrvw/FH4LyhwSQ==", + "integrity": "sha1-jTdS2Fgdhmh8Nb/iy4CsUhPOuME=", "requires": { "color-name": "^1.0.0" } @@ -2341,7 +2251,7 @@ "chokidar": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.0.3.tgz", - "integrity": "sha512-zW8iXYZtXMx4kux/nuZVXjkLP+CyIK5Al5FHnj1OgTKGZfp4Oy6/ymtMSKFv3GD8DviEmUPmJg9eFdJ/JzudMg==", + "integrity": "sha1-3L1PbLsqVbR5m6ioQKxSfl9LEXY=", "requires": { "anymatch": "^2.0.0", "async-each": "^1.0.0", @@ -2365,7 +2275,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -2448,7 +2358,7 @@ "cipher-base": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "integrity": "sha1-h2Dk7MJy9MNjUy+SbYdKriwTl94=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -2457,14 +2367,13 @@ "circular-json": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.3.3.tgz", - "integrity": "sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A==", + "integrity": "sha1-gVyZ6oT2gJUp0vRXkb34JxE1LWY=", "dev": true }, "clap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/clap/-/clap-1.2.3.tgz", - "integrity": "sha512-4CoL/A3hf90V3VIEjeuhSvlGFEHKzOz+Wfc2IVZc+FaUgU0ZQafJTP49fvnULipOPcAfqhyI2duwQyns6xqjYA==", - "dev": true, + "integrity": "sha1-TzZ0WzIAhJJVf0ZBLWbVDLmbzlE=", "requires": { "chalk": "^1.1.3" } @@ -2472,7 +2381,7 @@ "class-utils": { "version": "0.3.6", "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "integrity": "sha1-+TNprouafOAv1B+q0MqDAzGQxGM=", "requires": { "arr-union": "^3.1.0", "define-property": "^0.2.5", @@ -2499,7 +2408,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, "requires": { "restore-cursor": "^2.0.0" } @@ -2507,8 +2415,7 @@ "cli-spinners": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-1.3.1.tgz", - "integrity": "sha512-1QL4544moEsDVH9T/l6Cemov/37iv1RtoKf7NJ04A60+4MREXNfx/QvavbH6QoGdsD4N4Mwy49cmaINR/o2mdg==", - "dev": true + "integrity": "sha1-ACwZkJEtDVlYDJO9NsBW3pnkJZo=" }, "cli-width": { "version": "2.2.0", @@ -2519,8 +2426,7 @@ "clone": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", - "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=", - "dev": true + "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=" }, "clone-buffer": { "version": "1.0.0", @@ -2530,7 +2436,7 @@ "cloneable-readable": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/cloneable-readable/-/cloneable-readable-1.1.2.tgz", - "integrity": "sha512-Bq6+4t+lbM8vhTs/Bef5c5AdEMtapp/iFb6+s4/Hh9MVTt8OLKH7ZOOZSCT+Ys7hsHvqv0GuMPJ1lnQJVHvxpg==", + "integrity": "sha1-1ZHe5Kj4vBXaQ86X3O66E9Q+KmU=", "requires": { "inherits": "^2.0.1", "process-nextick-args": "^2.0.0", @@ -2540,8 +2446,7 @@ "clones": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/clones/-/clones-1.1.0.tgz", - "integrity": "sha1-h+kEEy1hQMXAtyAGwIwNBb17Y7M=", - "dev": true + "integrity": "sha1-h+kEEy1hQMXAtyAGwIwNBb17Y7M=" }, "co": { "version": "4.6.0", @@ -2552,7 +2457,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/coa/-/coa-1.0.4.tgz", "integrity": "sha1-qe8VNmDWqGqL3sAomlxoTSF0Mv0=", - "dev": true, "requires": { "q": "^1.1.2" } @@ -2575,7 +2479,6 @@ "version": "0.11.4", "resolved": "https://registry.npmjs.org/color/-/color-0.11.4.tgz", "integrity": "sha1-bXtcdPtl6EHNSHkq0e1eB7kE12Q=", - "dev": true, "requires": { "clone": "^1.0.2", "color-convert": "^1.3.0", @@ -2585,7 +2488,7 @@ "color-convert": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.1.tgz", - "integrity": "sha512-mjGanIiwQJskCC18rPR6OmrZ6fm2Lc7PeGFYwCmy5J34wC6F1PzdGL6xeMfmgicfYcNLGuVFA3WzXtIDCQSZxQ==", + "integrity": "sha1-wSYRB66y8pTr/+ye2eytUppgl+0=", "requires": { "color-name": "^1.1.1" } @@ -2599,7 +2502,6 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/color-string/-/color-string-0.3.0.tgz", "integrity": "sha1-J9RvtnAlxcL6JZk7+/V55HhBuZE=", - "dev": true, "requires": { "color-name": "^1.0.0" } @@ -2608,7 +2510,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/colormin/-/colormin-1.1.2.tgz", "integrity": "sha1-6i90IKcrlogaOKrlnsEkpvcpgTM=", - "dev": true, "requires": { "color": "^0.11.0", "css-color-names": "0.0.4", @@ -2654,13 +2555,12 @@ "command-exists": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.7.tgz", - "integrity": "sha512-doWDvhXCcW5LK0cIUWrOQ8oMFXJv3lEQCkJpGVjM8v9SV0uhqYXB943538tEA2CiaWqSyuYUGAm5ezDwEx9xlw==", - "dev": true + "integrity": "sha1-FoKPDD/ysMWIBYYe8hG2T8FWkqg=" }, "commander": { "version": "2.15.1", "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", - "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==" + "integrity": "sha1-30boZ9D8Kuxmo0ZitAapzK//Ww8=" }, "component-emitter": { "version": "1.2.1", @@ -2689,14 +2589,14 @@ "mime-db": { "version": "1.36.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.36.0.tgz", - "integrity": "sha512-L+xvyD9MkoYMXb1jAmzI/lWYAxAMCPvIBSWur0PZ5nOf5euahRLVqH//FKW9mWp2lkqUgYiXPgkzfMUFi4zVDw==" + "integrity": "sha1-UCBHjbPH/pOq17vMTc+GnEM2M5c=" } } }, "compression": { "version": "1.7.3", "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.3.tgz", - "integrity": "sha512-HSjyBG5N1Nnz7tF2+O7A9XUhyjru71/fwgNb7oIsEVHR0WShfs2tIS/EySLgiTe98aOK18YDlMXpzjCXY/n9mg==", + "integrity": "sha1-J+DhdqryYPfywoE8PkQK258Zk9s=", "requires": { "accepts": "~1.3.5", "bytes": "3.0.0", @@ -2710,7 +2610,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "integrity": "sha1-mR7GnSluAxN0fVm9/St0XDX4go0=" } } }, @@ -2722,7 +2622,7 @@ "concat-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "integrity": "sha1-kEvfGUzTEi/Gdcd/xKw9T/D9GjQ=", "requires": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", @@ -2733,8 +2633,7 @@ "config-chain": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.12.tgz", - "integrity": "sha512-a1eOIcu8+7lUInge4Rpf/n4Krkf3Dd9lqhljRzII1/Zno/kRtUWnznPO3jOKBmTEktkt3fkxisUcivoj0ebzoA==", - "dev": true, + "integrity": "sha1-D96NCRIA616AjK8l/mGMAvSOTvo=", "requires": { "ini": "^1.3.4", "proto-list": "~1.2.1" @@ -2766,13 +2665,12 @@ "content-type": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" + "integrity": "sha1-4TjMdeBAxyexlm/l5fjJruJW/js=" }, "convert-source-map": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.5.1.tgz", - "integrity": "sha1-uCeAl7m8IpNl3lxiz1/K7YtVmeU=", - "dev": true + "integrity": "sha1-uCeAl7m8IpNl3lxiz1/K7YtVmeU=" }, "cookie": { "version": "0.3.1", @@ -2801,8 +2699,7 @@ "core-js": { "version": "2.5.7", "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", - "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==", - "dev": true + "integrity": "sha1-+XJgj/DOrWi4QaFqky0LGDeRgU4=" }, "core-util-is": { "version": "1.0.2", @@ -2812,8 +2709,7 @@ "cosmiconfig": { "version": "5.0.6", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.0.6.tgz", - "integrity": "sha512-6DWfizHriCrFWURP1/qyhsiFvYdlJzbCzmtFWh744+KyWsJo5+kPzUZZaMRSSItoYc0pxFX7gEO7ZC1/gN/7AQ==", - "dev": true, + "integrity": "sha1-3KbPaAoL0DWJr/aEcAhYyBq+6zk=", "requires": { "is-directory": "^0.3.1", "js-yaml": "^3.9.0", @@ -2823,8 +2719,7 @@ "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", - "dev": true, + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -2834,7 +2729,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, "requires": { "error-ex": "^1.3.1", "json-parse-better-errors": "^1.0.1" @@ -2859,7 +2753,7 @@ "create-ecdh": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", - "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "integrity": "sha1-yREbbzMEXEaX8UR4f5JUzcd8Rf8=", "requires": { "bn.js": "^4.1.0", "elliptic": "^6.0.0" @@ -2868,7 +2762,7 @@ "create-hash": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "integrity": "sha1-iJB4rxGmN1a8+1m9IhmWvjqe8ZY=", "requires": { "cipher-base": "^1.0.1", "inherits": "^2.0.1", @@ -2880,7 +2774,7 @@ "ripemd160": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "integrity": "sha1-ocGm9iR1FXe6XQeRTLyShQWFiQw=", "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -2889,7 +2783,7 @@ "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -2900,7 +2794,7 @@ "create-hmac": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "integrity": "sha1-aRcMeLOrlXFHsriwRXLkfq0iQ/8=", "requires": { "cipher-base": "^1.0.3", "create-hash": "^1.1.0", @@ -2913,7 +2807,7 @@ "ripemd160": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "integrity": "sha1-ocGm9iR1FXe6XQeRTLyShQWFiQw=", "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -2922,7 +2816,7 @@ "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -2942,7 +2836,7 @@ "cryonic": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/cryonic/-/cryonic-1.0.0.tgz", - "integrity": "sha512-8wqWtdI+7IQVYCDS40H/H267zb2Lwn08Q7HT0hIqHNMkRPQdV355dPRu/hV02k2sBtZJ+KEnRVtaZWzT3hPVmQ==" + "integrity": "sha1-VzIOdX18OrxykUsDfgSSlea9BPQ=" }, "cryptiles": { "version": "3.1.2", @@ -2955,7 +2849,7 @@ "boom": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/boom/-/boom-5.2.0.tgz", - "integrity": "sha512-Z5BTk6ZRe4tXXQlkqftmsAUANpXmuwlsF5Oov8ThoMbQRzdGTA1ngYRW160GexgOgjsFOKJz0LYhoNi+2AMBUw==", + "integrity": "sha1-XdnabuOl8wIHdDYpDLcX0/SlTgI=", "requires": { "hoek": "4.x.x" } @@ -2965,7 +2859,7 @@ "crypto-browserify": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "integrity": "sha1-OWz58xN/A+S45TLFj2mCVOAPgOw=", "requires": { "browserify-cipher": "^1.0.0", "browserify-sign": "^4.0.0", @@ -2983,14 +2877,12 @@ "css-color-names": { "version": "0.0.4", "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=", - "dev": true + "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=" }, "css-declaration-sorter": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-3.0.1.tgz", - "integrity": "sha512-jH4024SHZ3e0M7ann9VxpFpH3moplRXNz9ZBqvFMZqi09Yo5ARbs2wdPH8GqN9iRTlQynrbGbraNbBxBLei85Q==", - "dev": true, + "integrity": "sha1-0OMFaw/YjcHqnc7/Q1rb6ccCp/g=", "requires": { "postcss": "^6.0.0", "timsort": "^0.3.0" @@ -3000,7 +2892,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/css-modules-loader-core/-/css-modules-loader-core-1.1.0.tgz", "integrity": "sha1-WQhmgpShvs0mGuCkziGwtVHyHRY=", - "dev": true, "requires": { "icss-replace-symbols": "1.1.0", "postcss": "6.0.1", @@ -3013,14 +2904,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.1.tgz", "integrity": "sha1-AA29H47vIXqjaLmiEsX8QLKo8/I=", - "dev": true, "requires": { "chalk": "^1.1.3", "source-map": "^0.5.6", @@ -3031,7 +2920,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.1.0.tgz", "integrity": "sha1-thTJcgvmgW6u41+zpfqh26agXds=", - "dev": true, "requires": { "postcss": "^6.0.1" } @@ -3040,7 +2928,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -3051,7 +2938,6 @@ "version": "1.3.0-rc0", "resolved": "https://registry.npmjs.org/css-select/-/css-select-1.3.0-rc0.tgz", "integrity": "sha1-b5MZaqrnN2ZuoQNqjLFKj8t6kjE=", - "dev": true, "requires": { "boolbase": "^1.0.0", "css-what": "2.1", @@ -3062,14 +2948,12 @@ "css-select-base-adapter": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.0.tgz", - "integrity": "sha1-AQKz0UYw34bD65+p9UVicBBs+ZA=", - "dev": true + "integrity": "sha1-AQKz0UYw34bD65+p9UVicBBs+ZA=" }, "css-selector-tokenizer": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/css-selector-tokenizer/-/css-selector-tokenizer-0.7.0.tgz", "integrity": "sha1-5piEdK6MlTR3v15+/s/OzNnPTIY=", - "dev": true, "requires": { "cssesc": "^0.1.0", "fastparse": "^1.1.1", @@ -3079,8 +2963,7 @@ "css-tree": { "version": "1.0.0-alpha25", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha25.tgz", - "integrity": "sha512-XC6xLW/JqIGirnZuUWHXCHRaAjje2b3OIB0Vj5RIJo6mIi/AdJo30quQl5LxUl0gkXDIrTrFGbMlcZjyFplz1A==", - "dev": true, + "integrity": "sha1-G7+r+/bu708B2RCP8u3Qvi/jVZc=", "requires": { "mdn-data": "^1.0.0", "source-map": "^0.5.3" @@ -3089,32 +2972,27 @@ "css-unit-converter": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.1.tgz", - "integrity": "sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY=", - "dev": true + "integrity": "sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY=" }, "css-url-regex": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/css-url-regex/-/css-url-regex-1.1.0.tgz", - "integrity": "sha1-g4NCMMyfdMRX3lnuvRVD/uuDt+w=", - "dev": true + "integrity": "sha1-g4NCMMyfdMRX3lnuvRVD/uuDt+w=" }, "css-what": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.0.tgz", - "integrity": "sha1-lGfQMsOM+u+58teVASUwYvh/ob0=", - "dev": true + "integrity": "sha1-lGfQMsOM+u+58teVASUwYvh/ob0=" }, "cssesc": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-0.1.0.tgz", - "integrity": "sha1-yBSQPkViM3GgR3tAEJqq++6t27Q=", - "dev": true + "integrity": "sha1-yBSQPkViM3GgR3tAEJqq++6t27Q=" }, "cssnano": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-3.10.0.tgz", "integrity": "sha1-Tzj2zqK5sX+gFJDyPx3GjqZcHDg=", - "dev": true, "requires": { "autoprefixer": "^6.3.1", "decamelize": "^1.1.2", @@ -3154,7 +3032,6 @@ "version": "6.7.7", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-6.7.7.tgz", "integrity": "sha1-Hb0cg1ZY41zj+ZhAmdsAWFx4IBQ=", - "dev": true, "requires": { "browserslist": "^1.7.6", "caniuse-db": "^1.0.30000634", @@ -3168,7 +3045,6 @@ "version": "1.7.7", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-1.7.7.tgz", "integrity": "sha1-C9dnBCWL6CmyOYu1Dkti0aFmsLk=", - "dev": true, "requires": { "caniuse-db": "^1.0.30000639", "electron-to-chromium": "^1.2.7" @@ -3177,14 +3053,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -3196,7 +3070,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -3207,7 +3080,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.0.tgz", "integrity": "sha1-wzQoe099SfstFwqS+SFGVXiOO2s=", - "dev": true, "requires": { "css-declaration-sorter": "^3.0.0", "cssnano-util-raw-cache": "^4.0.0", @@ -3244,8 +3116,7 @@ "browserslist": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.1.1.tgz", - "integrity": "sha512-VBorw+tgpOtZ1BYhrVSVTzTt/3+vSE3eFUh0N2GCFK1HffceOaf32YS/bs6WiFhjDAblAFrx85jMy3BG9fBK2Q==", - "dev": true, + "integrity": "sha1-Mo60/xIVsS32WJ6auC+K2qT8jNY=", "requires": { "caniuse-lite": "^1.0.30000884", "electron-to-chromium": "^1.3.62", @@ -3255,8 +3126,7 @@ "caniuse-api": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dev": true, + "integrity": "sha1-Xk2Q4idJYdRikZl99Znj7QCO5MA=", "requires": { "browserslist": "^4.0.0", "caniuse-lite": "^1.0.0", @@ -3267,14 +3137,12 @@ "caniuse-lite": { "version": "1.0.30000885", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000885.tgz", - "integrity": "sha512-cXKbYwpxBLd7qHyej16JazPoUacqoVuDhvR61U7Fr5vSxMUiodzcYa1rQYRYfZ5GexV03vGZHd722vNPLjPJGQ==", - "dev": true + "integrity": "sha1-6Inp+OflDnafKkljTJMriu5iKYQ=" }, "coa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.1.tgz", - "integrity": "sha512-5wfTTO8E2/ja4jFSxePXlG5nRu5bBtL/r1HCIpJW/lzT6yDtKl0u0Z4o/Vpz32IpKmBn7HerheEZQgA9N2DarQ==", - "dev": true, + "integrity": "sha1-8/iwsVBz411wJj+xBCyywCPbOK8=", "requires": { "q": "^1.1.2" } @@ -3282,8 +3150,7 @@ "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", - "dev": true, + "integrity": "sha1-2SC0Mo1TSjrIKV1o971LpsQnvpo=", "requires": { "color-convert": "^1.9.1", "color-string": "^1.5.2" @@ -3292,8 +3159,7 @@ "color-string": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", - "dev": true, + "integrity": "sha1-ybvF8BtYtUkvPWhXRZy2WQziBMw=", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -3302,8 +3168,7 @@ "csso": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/csso/-/csso-3.5.1.tgz", - "integrity": "sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==", - "dev": true, + "integrity": "sha1-e564vmFiiXPBsmHhadLwJACOdYs=", "requires": { "css-tree": "1.0.0-alpha.29" }, @@ -3311,8 +3176,7 @@ "css-tree": { "version": "1.0.0-alpha.29", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.29.tgz", - "integrity": "sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==", - "dev": true, + "integrity": "sha1-P6nU7zFCy9HDAedmTB81K9gvWjk=", "requires": { "mdn-data": "~1.1.0", "source-map": "^0.5.3" @@ -3323,8 +3187,7 @@ "is-svg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-3.0.0.tgz", - "integrity": "sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ==", - "dev": true, + "integrity": "sha1-kyHb0pwhLlypnE+peUxxS8r6L3U=", "requires": { "html-comment-regex": "^1.1.0" } @@ -3332,8 +3195,7 @@ "js-yaml": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.10.0.tgz", - "integrity": "sha512-O2v52ffjLa9VeM43J4XocZE//WT9N0IiwDa3KSHH7Tu8CtH+1qM8SIZvnsTh6v+4yFy5KUY3BHUVwjpfAWsjIA==", - "dev": true, + "integrity": "sha1-LnhEFka9RoLpY/IrbpKCPDCcYtw=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3342,20 +3204,17 @@ "mdn-data": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-1.1.4.tgz", - "integrity": "sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==", - "dev": true + "integrity": "sha1-ULXU/8RXUnZXPE7tuHgIEqhBnwE=" }, "normalize-url": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", - "dev": true + "integrity": "sha1-suHE3E98bVd0PfczpPWXjRhlBVk=" }, "postcss-calc": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-6.0.1.tgz", "integrity": "sha1-PSQXG79udinUIqQ26/5t2VEfQzA=", - "dev": true, "requires": { "css-unit-converter": "^1.1.1", "postcss": "^6.0.0", @@ -3367,7 +3226,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.1.tgz", "integrity": "sha1-bxwYoBVbxpYT8v8ThD4uSuj/C74=", - "dev": true, "requires": { "browserslist": "^4.0.0", "color": "^3.0.0", @@ -3380,7 +3238,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.0.tgz", "integrity": "sha1-d9d9mu0dxOaVbmUcw0nVMwWHb2I=", - "dev": true, "requires": { "postcss": "^6.0.0", "postcss-value-parser": "^3.0.0" @@ -3390,7 +3247,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.0.tgz", "integrity": "sha1-loSimedrPpMmPvj9KtvxocCP2I0=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3399,7 +3255,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.0.tgz", "integrity": "sha1-QvPCZ/hfqQngQsNXZ+z9Zcsr1yw=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3408,7 +3263,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.0.tgz", "integrity": "sha1-VeGKWcdBKOOMfSgEvPpAVmEfuX8=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3417,7 +3271,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.0.tgz", "integrity": "sha1-Sgv4WXh4TPH4HtLBwf2dlkodofo=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3425,8 +3278,7 @@ "postcss-merge-longhand": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.5.tgz", - "integrity": "sha512-tw2obF6I2VhXhPMObQc1QpQO850m3arhqP3PcBAU7Tx70v73QF6brs9uK0XKMNuC7BPo6DW+fh07cGhrLL57HA==", - "dev": true, + "integrity": "sha1-AImNcjR/x+QLtWSxG9wIEZxZm1k=", "requires": { "css-color-names": "0.0.4", "postcss": "^6.0.0", @@ -3438,7 +3290,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.1.tgz", "integrity": "sha1-Qw/Vmz8u0uivzQsxJ47aOYVKuxA=", - "dev": true, "requires": { "browserslist": "^4.0.0", "caniuse-api": "^3.0.0", @@ -3452,7 +3303,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", - "dev": true, "requires": { "dot-prop": "^4.1.1", "indexes-of": "^1.0.1", @@ -3465,7 +3315,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.0.tgz", "integrity": "sha1-TMM9KD1qgXWQNudX75gdksvYW+0=", - "dev": true, "requires": { "postcss": "^6.0.0", "postcss-value-parser": "^3.0.0" @@ -3475,7 +3324,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.0.tgz", "integrity": "sha1-P8ORZDnSepu4Bm23za2AFlDrCQ4=", - "dev": true, "requires": { "cssnano-util-get-arguments": "^4.0.0", "is-color-stop": "^1.0.0", @@ -3487,7 +3335,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.0.tgz", "integrity": "sha1-BekWbuSMBa9lGYnOhNOcG015BnQ=", - "dev": true, "requires": { "alphanum-sort": "^1.0.0", "cssnano-util-get-arguments": "^4.0.0", @@ -3500,7 +3347,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.0.tgz", "integrity": "sha1-sen2xGNBbT/Nyybnt4XZX2FXiq0=", - "dev": true, "requires": { "alphanum-sort": "^1.0.0", "has": "^1.0.0", @@ -3512,7 +3358,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", - "dev": true, "requires": { "dot-prop": "^4.1.1", "indexes-of": "^1.0.1", @@ -3525,7 +3370,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.0.tgz", "integrity": "sha1-JFJyknAtXoEp6vo9HeSe1RpqtzA=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3534,7 +3378,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.0.tgz", "integrity": "sha1-t6nIrSbPJmlMFG6y1ovQz0mVbw0=", - "dev": true, "requires": { "is-absolute-url": "^2.0.0", "normalize-url": "^3.0.0", @@ -3545,8 +3388,7 @@ "postcss-ordered-values": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.0.tgz", - "integrity": "sha512-gbqbEiONKKJgoOKhtzBjFqmHSzviPL4rv0ACVcFS7wxWXBY07agFXRQ7Y3eMGV0ZORzQXp2NGnj0c+imJG0NcA==", - "dev": true, + "integrity": "sha1-LHadXUSqPHyQe4vi6ZftGd/Y1Qo=", "requires": { "cssnano-util-get-arguments": "^4.0.0", "postcss": "^6.0.0", @@ -3557,7 +3399,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.1.tgz", "integrity": "sha1-8tWPUM6isMXcEnjW6l7Q/1gpwpM=", - "dev": true, "requires": { "browserslist": "^4.0.0", "caniuse-api": "^3.0.0", @@ -3569,7 +3410,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.0.tgz", "integrity": "sha1-9kX8dEDDUnT0DegQThStcWPt8Yg=", - "dev": true, "requires": { "cssnano-util-get-match": "^4.0.0", "has": "^1.0.0", @@ -3581,7 +3421,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.0.tgz", "integrity": "sha1-wLutAlIPxjbJ14sOhAPi5RXDIoU=", - "dev": true, "requires": { "is-svg": "^3.0.0", "postcss": "^6.0.0", @@ -3593,7 +3432,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.0.tgz", "integrity": "sha1-BMHpdkx1h0JhMDQCxB8Ol2n8VQE=", - "dev": true, "requires": { "alphanum-sort": "^1.0.0", "postcss": "^6.0.0", @@ -3603,8 +3441,7 @@ "reduce-css-calc": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.4.tgz", - "integrity": "sha512-i/vWQbyd3aJRmip9OVSN9V6nIjLf/gg/ctxb0CpvHWtcRysFl/ngDBQD+rqavxdw/doScA3GMBXhzkHQ4GCzFQ==", - "dev": true, + "integrity": "sha1-wg6c2oRFrXPU/0vqlgxvg1N5Fwg=", "requires": { "css-unit-converter": "^1.1.1", "postcss-value-parser": "^3.3.0" @@ -3613,8 +3450,7 @@ "svgo": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.0.5.tgz", - "integrity": "sha512-nYrifviB77aNKDNKKyuay3M9aYiK6Hv5gJVDdjj2ZXTQmI8WZc8+UPLR5IpVlktJfSu3co/4XcWgrgI6seGBPg==", - "dev": true, + "integrity": "sha1-cEA2TAYqBTirrP9EAc6momp6OJo=", "requires": { "coa": "~2.0.1", "colors": "~1.1.2", @@ -3637,20 +3473,17 @@ "cssnano-util-get-arguments": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=", - "dev": true + "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=" }, "cssnano-util-get-match": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=", - "dev": true + "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=" }, "cssnano-util-raw-cache": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.0.tgz", "integrity": "sha1-vgooVuJfGF9feivMBiTii38Xmp8=", - "dev": true, "requires": { "postcss": "^6.0.0" } @@ -3658,14 +3491,12 @@ "cssnano-util-same-parent": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.0.tgz", - "integrity": "sha1-0qPeEDmqmLxOwlAB+gUDMMKhbaw=", - "dev": true + "integrity": "sha1-0qPeEDmqmLxOwlAB+gUDMMKhbaw=" }, "csso": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/csso/-/csso-2.3.2.tgz", "integrity": "sha1-3dUsWHAz9J6Utx/FVWnyUuj/X4U=", - "dev": true, "requires": { "clap": "^1.0.9", "source-map": "^0.5.3" @@ -3695,8 +3526,7 @@ "deasync": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/deasync/-/deasync-0.1.13.tgz", - "integrity": "sha512-/6ngYM7AapueqLtvOzjv9+11N2fHDSrkxeMF1YPE20WIfaaawiBg+HZH1E5lHrcJxlKR42t6XPOEmMmqcAsU1g==", - "dev": true, + "integrity": "sha1-gVwrabvREXyuVwFSzYlWYcCfIOo=", "requires": { "bindings": "~1.2.1", "nan": "^2.0.7" @@ -3705,7 +3535,7 @@ "debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "integrity": "sha1-XRKFFd8TT/Mn6QpMk/Tgd6U2NB8=", "requires": { "ms": "2.0.0" } @@ -3723,7 +3553,7 @@ "decompress-zip": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/decompress-zip/-/decompress-zip-0.3.1.tgz", - "integrity": "sha512-pNGzi0RIpLA/CqrMQoSuh/1+YiVGJSEhQeibgoZQEdPFQOhO5pvqim3sp1qMvio3+mkonUQ1Akjdw8RgvV/RsA==", + "integrity": "sha1-Pkxpcv6A2Juw+pVC4wBEAZrm9iY=", "optional": true, "requires": { "binary": "^0.3.0", @@ -3764,7 +3594,7 @@ "deep-eql": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", + "integrity": "sha1-38lARACtHI/gI+faHfHBR8S0RN8=", "requires": { "type-detect": "^4.0.0" } @@ -3772,14 +3602,12 @@ "deep-is": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" }, "defaults": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", - "dev": true, "requires": { "clone": "^1.0.2" } @@ -3788,7 +3616,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.2.tgz", "integrity": "sha1-g6c/L+pWmJj7c3GTyPhzyvbUXJQ=", - "dev": true, "requires": { "foreach": "^2.0.5", "object-keys": "^1.0.8" @@ -3797,15 +3624,14 @@ "object-keys": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.11.tgz", - "integrity": "sha1-xUYBd4rVYPEULODgG8yotW0TQm0=", - "dev": true + "integrity": "sha1-xUYBd4rVYPEULODgG8yotW0TQm0=" } } }, "define-property": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "integrity": "sha1-1Flono1lS6d+AqgX+HENcCyxbp0=", "requires": { "is-descriptor": "^1.0.2", "isobject": "^3.0.1" @@ -3814,7 +3640,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -3822,7 +3648,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -3830,7 +3656,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -3845,7 +3671,7 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, @@ -3940,7 +3766,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=", - "dev": true, "requires": { "repeating": "^2.0.0" } @@ -3948,7 +3773,7 @@ "detective": { "version": "4.7.1", "resolved": "https://registry.npmjs.org/detective/-/detective-4.7.1.tgz", - "integrity": "sha512-H6PmeeUcZloWtdt4DAkFyzFL94arpHr3NOwwmVILFiy+9Qd4JTxxXrzfyGk/lmct2qVGBwTSwSXagqu2BxmWig==", + "integrity": "sha1-DspzFDOEQv67bWXaVMELscgrJG4=", "requires": { "acorn": "^5.2.1", "defined": "^1.0.0" @@ -3957,12 +3782,12 @@ "diff": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==" + "integrity": "sha1-gAwN0eCov7yVg1wgKtIg/jF+WhI=" }, "diffie-hellman": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "integrity": "sha1-QOjumPVaIUlgcUaSHGPhrl89KHU=", "requires": { "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", @@ -3972,12 +3797,12 @@ "djbx": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/djbx/-/djbx-1.0.3.tgz", - "integrity": "sha512-Y8ph/85fEChtSgSgw1asP4cGLNLxlbnDBnQMpX8+MOpaiYyOn8assnSpIrwHuoGZV/sE1DUbKh9aeKlWZdHKEg==" + "integrity": "sha1-xYlm0sBdDdX9Q8eHiUsFnubMmxg=" }, "doctrine": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "integrity": "sha1-XNAfwQFiG0LEzX9dGmYkNxbT850=", "dev": true, "requires": { "esutils": "^2.0.2" @@ -3987,7 +3812,6 @@ "version": "0.1.0", "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.0.tgz", "integrity": "sha1-BzxpdUbOB4DOI75KKOKT5AvDDII=", - "dev": true, "requires": { "domelementtype": "~1.1.1", "entities": "~1.1.1" @@ -3996,28 +3820,24 @@ "domelementtype": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.1.3.tgz", - "integrity": "sha1-vSh3PiZCiBrsUVRJJCmcXNgiGFs=", - "dev": true + "integrity": "sha1-vSh3PiZCiBrsUVRJJCmcXNgiGFs=" } } }, "domain-browser": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", - "dev": true + "integrity": "sha1-PTH1AZGmdJ3RN1p/Ui6CPULlTto=" }, "domelementtype": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.0.tgz", - "integrity": "sha1-sXrtguirWeUt2cGbF1bg/BhyBMI=", - "dev": true + "integrity": "sha1-sXrtguirWeUt2cGbF1bg/BhyBMI=" }, "domhandler": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dev": true, + "integrity": "sha1-iAUJfpM9ZehVRvcm1g9euItE+AM=", "requires": { "domelementtype": "1" } @@ -4026,7 +3846,6 @@ "version": "1.5.1", "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=", - "dev": true, "requires": { "dom-serializer": "0", "domelementtype": "1" @@ -4035,8 +3854,7 @@ "dot-prop": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz", - "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==", - "dev": true, + "integrity": "sha1-HxngwuGqDjJ5fEl5nyg3rGr2nFc=", "requires": { "is-obj": "^1.0.0" } @@ -4044,13 +3862,12 @@ "dotenv": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-5.0.1.tgz", - "integrity": "sha512-4As8uPrjfwb7VXC+WnLCbXK7y+Ueb2B3zgNCePYfhxS1PYeaO1YTeplffTEcbfLhvFNGLAz90VvJs9yomG7bow==", - "dev": true + "integrity": "sha1-pTF0Wb09eauIz/bkQFemo/ux/O8=" }, "dropzone": { "version": "5.5.1", "resolved": "https://registry.npmjs.org/dropzone/-/dropzone-5.5.1.tgz", - "integrity": "sha512-3VduRWLxx9hbVr42QieQN25mx/I61/mRdUSuxAmDGdDqZIN8qtP7tcKMa3KfpJjuGjOJGYYUzzeq6eGDnkzesA==" + "integrity": "sha1-BuL1E+YdaqNj1LVW8YV09Hz3uiY=" }, "ecc-jsbn": { "version": "0.1.1", @@ -4064,8 +3881,7 @@ "editorconfig": { "version": "0.15.0", "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-0.15.0.tgz", - "integrity": "sha512-j7JBoj/bpNzvoTQylfRZSc85MlLNKWQiq5y6gwKhmqD2h1eZ+tH4AXbkhEJD468gjDna/XMx2YtSkCxBRX9OGg==", - "dev": true, + "integrity": "sha1-tt1KC2ueds5I4Ga9wVOBrruIBP0=", "requires": { "@types/commander": "^2.11.0", "@types/semver": "^5.4.0", @@ -4083,8 +3899,12 @@ "electron-to-chromium": { "version": "1.3.67", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.67.tgz", - "integrity": "sha512-h3zEBLdHvsKfaXv1SHAtykJyNtwYFEKkrWGSFyW1BzGgPQ4ykAzD5Hd8C5MZGTAEhkCKmtyIwYUrapsI0xfKww==", - "dev": true + "integrity": "sha1-Xo8/+sibSwQCx+GlZb4G86EJq7w=" + }, + "element-matches": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/element-matches/-/element-matches-0.1.2.tgz", + "integrity": "sha512-yWh1otcs3OKUWDvu/IxyI36ZI3WNaRZlI0uG/DK6fu0pap0VYZ0J5pEGTk1zakme+hT0OKHwhlHc0N5TJhY6yQ==" }, "elliptic": { "version": "6.4.0", @@ -4103,8 +3923,7 @@ "emojis-list": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", - "dev": true + "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=" }, "encodeurl": { "version": "1.0.2", @@ -4114,7 +3933,7 @@ "end-of-stream": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz", - "integrity": "sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==", + "integrity": "sha1-7SljTRm6ukY7bOa4CjchPqtx7EM=", "requires": { "once": "^1.4.0" } @@ -4122,8 +3941,7 @@ "entities": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.1.tgz", - "integrity": "sha1-blwtClYhtdra7O+AuQ7ftc13cvA=", - "dev": true + "integrity": "sha1-blwtClYhtdra7O+AuQ7ftc13cvA=" }, "error-ex": { "version": "1.3.1", @@ -4136,8 +3954,7 @@ "es-abstract": { "version": "1.12.0", "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.12.0.tgz", - "integrity": "sha512-C8Fx/0jFmV5IPoMOFPA9P9G5NtqW+4cOPit3MIuvR2t7Ag2K15EJTpxnHAYTzL+aYQJIESYeXZmDBfOBE1HcpA==", - "dev": true, + "integrity": "sha1-nbvdJ8aFbwABQhyhh4LXhr+KYWU=", "requires": { "es-to-primitive": "^1.1.1", "function-bind": "^1.1.1", @@ -4150,7 +3967,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.1.1.tgz", "integrity": "sha1-RTVSSKiJeQNLZ5Lhm7gfK3l13Q0=", - "dev": true, "requires": { "is-callable": "^1.1.1", "is-date-object": "^1.0.1", @@ -4170,8 +3986,7 @@ "escodegen": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.9.1.tgz", - "integrity": "sha512-6hTjO1NAWkHnDk3OqQ4YrCuwwmGHL9S3nPlzBOUG/R44rda3wLNrfvQ5fkSGjyhHFKM7ALPKcKGrwvCLe0lC7Q==", - "dev": true, + "integrity": "sha1-264X75bI5L7bE1b0UE+kzC98t+I=", "requires": { "esprima": "^3.1.3", "estraverse": "^4.2.0", @@ -4183,14 +3998,12 @@ "esprima": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/esprima/-/esprima-3.1.3.tgz", - "integrity": "sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=", - "dev": true + "integrity": "sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=" }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=", "optional": true } } @@ -4198,7 +4011,7 @@ "eslint": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.5.0.tgz", - "integrity": "sha512-m+az4vYehIJgl1Z0gb25KnFXeqQRdNreYsei1jdvkd9bB+UNQD3fsuiC2AWSQ56P+/t++kFSINZXFbfai+krOw==", + "integrity": "sha1-hVf8zqtRQagZfan/2ZBPifZEJcY=", "dev": true, "requires": { "@babel/code-frame": "^7.0.0", @@ -4244,7 +4057,7 @@ "ajv": { "version": "6.5.3", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.5.3.tgz", - "integrity": "sha512-LqZ9wY+fx3UMiiPd741yB2pj3hhil+hQc8taf4o2QGRFpWgZ2V5C8HA165DY9sS3fJwsk7uT7ZlFEyC3Ig3lLg==", + "integrity": "sha1-caVp0Yns9PTzISJP7LFm8HHdkPk=", "dev": true, "requires": { "fast-deep-equal": "^2.0.1", @@ -4262,7 +4075,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "dev": true, "requires": { "color-convert": "^1.9.0" @@ -4271,7 +4084,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "dev": true, "requires": { "ansi-styles": "^3.2.1", @@ -4282,7 +4095,7 @@ "cross-spawn": { "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "integrity": "sha1-Sl7Hxk364iw6FBJNus3uhG2Ay8Q=", "dev": true, "requires": { "nice-try": "^1.0.4", @@ -4295,7 +4108,7 @@ "debug": { "version": "3.2.5", "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.5.tgz", - "integrity": "sha512-D61LaDQPQkxJ5AUM2mbSJRbPkNs/TmdmOeLAi1hgDkpDfIfetSrjmWhccwtuResSwMbACjx/xXQofvM9CE/aeg==", + "integrity": "sha1-wkGPv9ein01PcP9M6mBNS2TEZAc=", "dev": true, "requires": { "ms": "^2.1.1" @@ -4310,7 +4123,7 @@ "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "dev": true, "requires": { "argparse": "^1.0.7", @@ -4320,19 +4133,19 @@ "json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=", "dev": true }, "ms": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "integrity": "sha1-MKWGTrPrsKZvLr5tcnrwagnYbgo=", "dev": true }, "semver": { "version": "5.5.1", "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.1.tgz", - "integrity": "sha512-PqpAxfrEhlSUWge8dwIp4tZnQ25DIOthpiaHNIthsjEFQD6EvqUKUDM7L8O2rShkFccYo1VjJR0coWfNkCubRw==", + "integrity": "sha1-ff3YgUvbfKvHvg+x1zTPtmyUBHc=", "dev": true }, "strip-ansi": { @@ -4355,7 +4168,7 @@ "eslint-scope": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.0.tgz", - "integrity": "sha512-1G6UTDi7Jc1ELFwnR58HV4fK9OQK4S6N985f166xqXxpjU6plxFISJa2Ba9KCQuFa8RCnj/lSFJbHo7UFDBnUA==", + "integrity": "sha1-UL8wcekzi83EMzF5Sgy1M/ATYXI=", "dev": true, "requires": { "esrecurse": "^4.1.0", @@ -4365,19 +4178,19 @@ "eslint-utils": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.3.1.tgz", - "integrity": "sha512-Z7YjnIldX+2XMcjr7ZkgEsOj/bREONV60qYeB/bjMAqqqZ4zxKyWX+BOUkdmRmA9riiIPVvo5x86m5elviOk0Q==", + "integrity": "sha1-moUbqJ7nxGA0b5fPiTnHKYgn5RI=", "dev": true }, "eslint-visitor-keys": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz", - "integrity": "sha512-qzm/XxIbxm/FHyH341ZrbnMUpe+5Bocte9xkmFMzPMjRaZMcXww+MpBptFvtU+79L362nqiLhekCxCxDPaUMBQ==", + "integrity": "sha1-PzGA+y4pEBdxastMnW1bXDSmqB0=", "dev": true }, "espree": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/espree/-/espree-4.0.0.tgz", - "integrity": "sha512-kapdTCt1bjmspxStVKX6huolXVV5ZfyZguY1lcfhVVZstce3bqxH9mcLzNn3/mlgW6wQ732+0fuG9v7h0ZQoKg==", + "integrity": "sha1-JTmY8goPgttdhmOFeZ2RKoOjZjQ=", "dev": true, "requires": { "acorn": "^5.6.0", @@ -4387,7 +4200,7 @@ "acorn-jsx": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-4.1.1.tgz", - "integrity": "sha512-JY+iV6r+cO21KtntVvFkD+iqjtdpRUpGqKWgfkCdZq1R+kbreEl8EcdcJR4SmiIgsIQT33s6QzheQ9a275Q8xw==", + "integrity": "sha1-6OQeSOov4MiWdAYQq2pP/YrdIl4=", "dev": true, "requires": { "acorn": "^5.0.3" @@ -4398,13 +4211,12 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "esquery": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.0.1.tgz", - "integrity": "sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA==", + "integrity": "sha1-QGxRZYsfWZGl+bYrHcJbAOPlxwg=", "dev": true, "requires": { "estraverse": "^4.0.0" @@ -4413,7 +4225,7 @@ "esrecurse": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", - "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "integrity": "sha1-AHo7n9vCs7uH5IeeoZyS/b05Qs8=", "dev": true, "requires": { "estraverse": "^4.1.0" @@ -4422,14 +4234,12 @@ "estraverse": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz", - "integrity": "sha1-De4/7TH81GlhjOc0IJn8GvoL2xM=", - "dev": true + "integrity": "sha1-De4/7TH81GlhjOc0IJn8GvoL2xM=" }, "esutils": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", - "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", - "dev": true + "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=" }, "etag": { "version": "1.8.1", @@ -4439,7 +4249,7 @@ "ev-emitter": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ev-emitter/-/ev-emitter-1.1.1.tgz", - "integrity": "sha512-ipiDYhdQSCZ4hSbX4rMW+XzNKMD1prg/sTvoVmSLkuQ1MVlwjJQQA+sW8tMYR3BLUr9KjodFV4pvzunvRhd33Q==" + "integrity": "sha1-jxiwzlx2pdGAF/ccCnlcZbkTjyo=" }, "events": { "version": "1.1.1", @@ -4449,7 +4259,7 @@ "evp_bytestokey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "integrity": "sha1-f8vbGY3HGVlDLv4ThCaE4FJaywI=", "requires": { "md5.js": "^1.3.4", "safe-buffer": "^5.1.1" @@ -4511,14 +4321,14 @@ "qs": { "version": "6.5.1", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", - "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==" + "integrity": "sha1-NJzfbu+J7EXBLX1es/wMhwNDptg=" } } }, "express-session": { "version": "1.15.6", "resolved": "https://registry.npmjs.org/express-session/-/express-session-1.15.6.tgz", - "integrity": "sha512-r0nrHTCYtAMrFwZ0kBzZEXa1vtPVrw0dKvGSrKP4dahwBQ1BJpF2/y1Pp4sCD/0kvxV4zZeclyvfmw0B4RMJQA==", + "integrity": "sha1-R7QWDIj0KrcP6KUI4xy/92dXqwo=", "requires": { "cookie": "0.3.1", "cookie-signature": "1.0.6", @@ -4548,7 +4358,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -4558,7 +4368,7 @@ "external-editor": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.0.3.tgz", - "integrity": "sha512-bn71H9+qWoOQKyZDo25mOMVpSmXROAsTJVVVYzrrtol3d4y+AsKjf4Iwl2Q+IuT0kFSQ1qo166UuIwqYq7mGnA==", + "integrity": "sha1-WGbbKal4Jtvkvzr9JAcOrZ6kOic=", "dev": true, "requires": { "chardet": "^0.7.0", @@ -4569,7 +4379,7 @@ "iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "integrity": "sha1-ICK0sl+93CHS9SSXSkdKr+czkIs=", "dev": true, "requires": { "safer-buffer": ">= 2.1.2 < 3" @@ -4594,7 +4404,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/falafel/-/falafel-2.1.0.tgz", "integrity": "sha1-lrsXdh2rqU9G0AFzizzt86Z/4Gw=", - "dev": true, "requires": { "acorn": "^5.0.0", "foreach": "^2.0.5", @@ -4605,14 +4414,12 @@ "isarray": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", - "dev": true + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" }, "object-keys": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", - "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==", - "dev": true + "integrity": "sha1-CcU4VTd1dTEMymL1W7M0q/97PtI=" } } }, @@ -4624,8 +4431,7 @@ "fast-glob": { "version": "2.2.2", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.2.tgz", - "integrity": "sha512-TR6zxCKftDQnUAPvkrCWdBgDq/gbqx8A3ApnBrR5rMvpp6+KMJI0Igw7fkWPgeVK0uhRXTXdvO3O+YP0CaUX2g==", - "dev": true, + "integrity": "sha1-cXIzOKybTg4v/x1nSKKhPV7TUr8=", "requires": { "@mrmlnc/readdir-enhanced": "^2.2.1", "@nodelib/fs.stat": "^1.0.1", @@ -4638,20 +4444,17 @@ "arr-diff": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" }, "array-unique": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" }, "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dev": true, + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -4669,7 +4472,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4680,7 +4482,6 @@ "version": "2.1.4", "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dev": true, "requires": { "debug": "^2.3.3", "define-property": "^0.2.5", @@ -4695,7 +4496,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -4704,7 +4504,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4713,7 +4512,6 @@ "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4722,7 +4520,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4733,7 +4530,6 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4742,7 +4538,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4752,8 +4547,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -4763,16 +4557,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dev": true, + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -4788,7 +4580,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, "requires": { "is-descriptor": "^1.0.0" } @@ -4797,7 +4588,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4808,7 +4598,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dev": true, "requires": { "extend-shallow": "^2.0.1", "is-number": "^3.0.0", @@ -4820,7 +4609,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -4831,7 +4619,6 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", - "dev": true, "requires": { "is-glob": "^3.1.0", "path-dirname": "^1.0.0" @@ -4841,7 +4628,6 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", - "dev": true, "requires": { "is-extglob": "^2.1.0" } @@ -4851,8 +4637,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -4860,8 +4645,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -4869,8 +4653,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -4880,14 +4663,12 @@ "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" }, "is-glob": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.0.tgz", "integrity": "sha1-lSHHaEXMJhCoUgPd8ICpWML/q8A=", - "dev": true, "requires": { "is-extglob": "^2.1.1" } @@ -4896,7 +4677,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -4905,7 +4685,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -4915,20 +4694,17 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dev": true, + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -4955,14 +4731,12 @@ "fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" }, "fastparse": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/fastparse/-/fastparse-1.1.1.tgz", - "integrity": "sha1-0eJkOzipTXWDtHkGDmxK/8lAcfg=", - "dev": true + "integrity": "sha1-0eJkOzipTXWDtHkGDmxK/8lAcfg=" }, "figures": { "version": "2.0.0", @@ -4991,13 +4765,12 @@ "filesize": { "version": "3.6.1", "resolved": "https://registry.npmjs.org/filesize/-/filesize-3.6.1.tgz", - "integrity": "sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg==", - "dev": true + "integrity": "sha1-CQuz7gG2+AGoqL6Z0xcQs0Irsxc=" }, "fill-range": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", - "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "integrity": "sha1-6x53OrsFbc2N8r/favWbizqTZWU=", "requires": { "is-number": "^2.1.0", "isobject": "^2.0.0", @@ -5009,7 +4782,7 @@ "finalhandler": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", - "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", + "integrity": "sha1-7r9O2EAHnIP0JJA4ydcDAIMBsQU=", "requires": { "debug": "2.6.9", "encodeurl": "~1.0.2", @@ -5023,7 +4796,7 @@ "find-package-json": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/find-package-json/-/find-package-json-1.1.0.tgz", - "integrity": "sha512-ldihxiIFpewACALK0tUByf3GmOaTBjI5OcvwJ3mExgERUZBsSOBV8QM6vVb0/ZqylpzY6Od6SsP8bjFv7fKiTw==" + "integrity": "sha1-JpsMFIu+OJ8CfiweDHL6EI6ts0g=" }, "find-up": { "version": "1.1.2", @@ -5047,7 +4820,7 @@ "fizzy-ui-utils": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/fizzy-ui-utils/-/fizzy-ui-utils-2.0.7.tgz", - "integrity": "sha512-CZXDVXQ1If3/r8s0T+v+qVeMshhfcuq0rqIFgJnrtd+Bu8GmDmqMjntjUePypVtjHXKJ6V4sw9zeyox34n9aCg==", + "integrity": "sha1-ffRdzE6zdKCLZdObuaS+7fczBQU=", "requires": { "desandro-matches-selector": "^2.0.0" } @@ -5067,8 +4840,7 @@ "flatten": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.2.tgz", - "integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=", - "dev": true + "integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=" }, "flux-standard-action": { "version": "0.6.1", @@ -5099,8 +4871,7 @@ "foreach": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", - "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=", - "dev": true + "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=" }, "forever-agent": { "version": "0.6.1", @@ -5138,7 +4909,7 @@ "fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + "integrity": "sha1-a+Dem+mYzhavivwkSXue6bfM2a0=" }, "fs.realpath": { "version": "1.0.0", @@ -5148,7 +4919,7 @@ "fsevents": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", - "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "integrity": "sha1-9B3LGvJYKvNpLaNvxVy9jhBBxCY=", "optional": true, "requires": { "nan": "^2.9.2", @@ -5621,8 +5392,7 @@ "fswatcher-child": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/fswatcher-child/-/fswatcher-child-1.1.0.tgz", - "integrity": "sha512-6+QZsCToxGmyO6odG1rijyHpiMwDnSMRdaY8/brb8mvxjbELRrLdRncdA3OImutqT8RCJ1meoneKBBxS9K2gcA==", - "dev": true, + "integrity": "sha1-rxZQWQ+JdTJDFzEkf9YnDzM5Ev8=", "requires": { "chokidar": "^2.0.3" } @@ -5630,7 +5400,7 @@ "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "integrity": "sha1-pWiZ0+o8m6uHS7l3O3xe3pL0iV0=" }, "functional-red-black-tree": { "version": "1.0.1", @@ -5656,7 +5426,7 @@ "gaze": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/gaze/-/gaze-1.1.3.tgz", - "integrity": "sha512-BRdNm8hbWzFzWHERTrejLqwHDfS4GibPoq5wjTPIoJHoBtKGPg3xAFfxmM+9ztbXelxcf2hwQcaz1PtmFeue8g==", + "integrity": "sha1-xEFzPhO5J6yMD/C0w7Az8ogSkko=", "requires": { "globule": "^1.0.0" } @@ -5665,7 +5435,6 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/generic-names/-/generic-names-1.0.3.tgz", "integrity": "sha1-LXhqEhruUIh2eWk56OO/+DbCCRc=", - "dev": true, "requires": { "loader-utils": "^0.2.16" } @@ -5673,7 +5442,7 @@ "get-assigned-identifiers": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/get-assigned-identifiers/-/get-assigned-identifiers-1.2.0.tgz", - "integrity": "sha512-mBBwmeGTrxEMO4pMaaf/uUEFHnYtwr8FTe8Y/mer4rcV/bye0qGm6pw1bGZFGStxC5O76c5ZAVBGnqHmOaJpdQ==" + "integrity": "sha1-bb9BHeZIy6+NkWnrsNLVdhkeL/E=" }, "get-caller-file": { "version": "1.0.2", @@ -5688,13 +5457,12 @@ "get-port": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/get-port/-/get-port-3.2.0.tgz", - "integrity": "sha1-3Xzn3hh8Bsi/NTeWrHHgmfCYDrw=", - "dev": true + "integrity": "sha1-3Xzn3hh8Bsi/NTeWrHHgmfCYDrw=" }, "get-size": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/get-size/-/get-size-2.0.3.tgz", - "integrity": "sha512-lXNzT/h/dTjTxRbm9BXb+SGxxzkm97h/PCIKtlN/CBCxxmkkIVV21udumMS93MuVTDX583gqc94v3RjuHmI+2Q==" + "integrity": "sha1-VKHQJWsg6nrGRlFnViAnaZQa0u8=" }, "get-stdin": { "version": "4.0.1", @@ -5717,7 +5485,7 @@ "glob": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", - "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "integrity": "sha1-wZyd+aAocC1nhhI4SmVSQExjbRU=", "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -5730,7 +5498,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -5757,19 +5525,18 @@ "glob-to-regexp": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=", - "dev": true + "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=" }, "globals": { "version": "11.7.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.7.0.tgz", - "integrity": "sha512-K8BNSPySfeShBQXsahYB/AbbWruVOTyVpgoIDnl8odPpeSfP2J5QO2oLFFdl2j7GfDCtZj2bMKar2T49itTPCg==", + "integrity": "sha1-pYP6pDBVsayncZFL9oJY4vwSVnM=", "dev": true }, "globule": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/globule/-/globule-1.2.1.tgz", - "integrity": "sha512-g7QtgWF4uYSL5/dn71WxubOrS7JVGCnFPEnoeChJmBnyR9Mw8nGoEwOgJL/RC2Te0WhbsEUCejfH8SZNJ+adYQ==", + "integrity": "sha1-Xf+xsZHyLSB5epNptJ6rTpg5aW0=", "requires": { "glob": "~7.1.1", "lodash": "~4.17.10", @@ -5785,7 +5552,6 @@ "version": "0.3.2", "resolved": "https://registry.npmjs.org/grapheme-breaker/-/grapheme-breaker-0.3.2.tgz", "integrity": "sha1-W55reMODJFLSuiuxy4MPlidkEKw=", - "dev": true, "requires": { "brfs": "^1.2.0", "unicode-trie": "^0.3.1" @@ -5794,7 +5560,7 @@ "growl": { "version": "1.10.5", "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==" + "integrity": "sha1-8nNdwig2dPpnR4sQGBBZNVw2nl4=" }, "har-schema": { "version": "2.0.0", @@ -5902,7 +5668,7 @@ "hash.js": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.3.tgz", - "integrity": "sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA==", + "integrity": "sha1-NA3tvmKQGHFRweodd3o0SJNd+EY=", "requires": { "inherits": "^2.0.3", "minimalistic-assert": "^1.0.0" @@ -5911,7 +5677,7 @@ "hawk": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/hawk/-/hawk-6.0.2.tgz", - "integrity": "sha512-miowhl2+U7Qle4vdLqDdPt9m09K6yZhkLDTWGoUiUzrQCn+mHHSmfJgAyGaLRZbPmTqfFFjRV1QWCW0VWUJBbQ==", + "integrity": "sha1-r02RTrBl+bXOTZ0RwcshJu7MMDg=", "requires": { "boom": "4.x.x", "cryptiles": "3.x.x", @@ -5927,8 +5693,7 @@ "hex-color-regex": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==", - "dev": true + "integrity": "sha1-TAb8y0YC/iYCs8k9+C1+fb8aio4=" }, "hmac-drbg": { "version": "1.0.1", @@ -5943,13 +5708,12 @@ "hoek": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz", - "integrity": "sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA==" + "integrity": "sha1-ljRQKqEsRF3Vp8VzS1cruHOKrLs=" }, "home-or-tmp": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", "integrity": "sha1-42w/LSyufXRqhX440Y1fMqeILbg=", - "dev": true, "requires": { "os-homedir": "^1.0.0", "os-tmpdir": "^1.0.1" @@ -5958,25 +5722,27 @@ "hosted-git-info": { "version": "2.6.0", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.6.0.tgz", - "integrity": "sha512-lIbgIIQA3lz5XaB6vxakj6sDHADJiZadYEJB+FgA+C4nubM1NwcuvUr9EJPmnH1skZqpqUzWborWo8EIUi0Sdw==" + "integrity": "sha1-IyNbKasjDFdqqw1PE/wEawsDgiI=" }, "hsl-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=", - "dev": true + "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=" }, "hsla-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=", - "dev": true + "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=" }, "html-comment-regex": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/html-comment-regex/-/html-comment-regex-1.1.1.tgz", - "integrity": "sha1-ZouTd26q5V696POtRkswekljYl4=", - "dev": true + "integrity": "sha1-ZouTd26q5V696POtRkswekljYl4=" + }, + "html5sortable": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/html5sortable/-/html5sortable-0.9.4.tgz", + "integrity": "sha512-WoHXrJjSsWc0nrW1Y1TWnkn+kspEDtvZzqizGk9XVbWSa+i4oUMmeJJmBnF5GjSP1BaohKl13ErntLNYoiVkWQ==" }, "htmlescape": { "version": "1.1.1", @@ -5986,8 +5752,7 @@ "htmlnano": { "version": "0.1.10", "resolved": "https://registry.npmjs.org/htmlnano/-/htmlnano-0.1.10.tgz", - "integrity": "sha512-eTEUzz8VdWYp+w/KUdb99kwao4reR64epUySyZkQeepcyzPQ2n2EPWzibf6QDxmkGy10Kr+CKxYqI3izSbmhJQ==", - "dev": true, + "integrity": "sha1-oKVI60x2rizyQj7HolyIFzTT3qY=", "requires": { "cssnano": "^3.4.0", "object-assign": "^4.0.1", @@ -6000,8 +5765,7 @@ "coa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.1.tgz", - "integrity": "sha512-5wfTTO8E2/ja4jFSxePXlG5nRu5bBtL/r1HCIpJW/lzT6yDtKl0u0Z4o/Vpz32IpKmBn7HerheEZQgA9N2DarQ==", - "dev": true, + "integrity": "sha1-8/iwsVBz411wJj+xBCyywCPbOK8=", "requires": { "q": "^1.1.2" } @@ -6009,8 +5773,7 @@ "csso": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/csso/-/csso-3.5.1.tgz", - "integrity": "sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==", - "dev": true, + "integrity": "sha1-e564vmFiiXPBsmHhadLwJACOdYs=", "requires": { "css-tree": "1.0.0-alpha.29" }, @@ -6018,8 +5781,7 @@ "css-tree": { "version": "1.0.0-alpha.29", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.29.tgz", - "integrity": "sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==", - "dev": true, + "integrity": "sha1-P6nU7zFCy9HDAedmTB81K9gvWjk=", "requires": { "mdn-data": "~1.1.0", "source-map": "^0.5.3" @@ -6030,8 +5792,7 @@ "js-yaml": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.10.0.tgz", - "integrity": "sha512-O2v52ffjLa9VeM43J4XocZE//WT9N0IiwDa3KSHH7Tu8CtH+1qM8SIZvnsTh6v+4yFy5KUY3BHUVwjpfAWsjIA==", - "dev": true, + "integrity": "sha1-LnhEFka9RoLpY/IrbpKCPDCcYtw=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -6040,14 +5801,12 @@ "mdn-data": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-1.1.4.tgz", - "integrity": "sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==", - "dev": true + "integrity": "sha1-ULXU/8RXUnZXPE7tuHgIEqhBnwE=" }, "svgo": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.0.5.tgz", - "integrity": "sha512-nYrifviB77aNKDNKKyuay3M9aYiK6Hv5gJVDdjj2ZXTQmI8WZc8+UPLR5IpVlktJfSu3co/4XcWgrgI6seGBPg==", - "dev": true, + "integrity": "sha1-cEA2TAYqBTirrP9EAc6momp6OJo=", "requires": { "coa": "~2.0.1", "colors": "~1.1.2", @@ -6071,7 +5830,6 @@ "version": "3.9.2", "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.9.2.tgz", "integrity": "sha1-G9+HrMoPP55T+k/M6w9LTLsAszg=", - "dev": true, "requires": { "domelementtype": "^1.3.0", "domhandler": "^2.3.0", @@ -6160,29 +5918,28 @@ "iconv-lite": { "version": "0.4.19", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", - "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==" + "integrity": "sha1-90aPYBNfXl2tM5nAqBvpoWA6CCs=" }, "icss-replace-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz", - "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=", - "dev": true + "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=" }, "ieee754": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.11.tgz", - "integrity": "sha512-VhDzCKN7K8ufStx/CLj5/PDTMgph+qwN5Pkd5i0sGnVwk56zJ0lkT8Qzi1xqWLS0Wp29DgDtNeS7v8/wMoZeHg==" + "integrity": "sha1-wWOE/+APW3g1gk5ntvK9RKUilFU=" }, "ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "integrity": "sha1-dQ49tYYgh7RzfrrIIH/9HvJ7Jfw=", "dev": true }, "imagelightbox": { "version": "0.10.0", "resolved": "https://registry.npmjs.org/imagelightbox/-/imagelightbox-0.10.0.tgz", - "integrity": "sha512-XiRkdgz+ZKq3VqBB5LQ2fcHSbjHWT1PT1jtHb4XWc2eJBsTY6LwNaXYZRLooYLiLqnil8+F+nLoineE2+TpLxA==", + "integrity": "sha1-FLAYZkl/kSTGBojadzU8WBIKrI4=", "requires": { "jquery": ">=1.12.4 <4.0.0" } @@ -6209,8 +5966,7 @@ "indexes-of": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=", - "dev": true + "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=" }, "indexof": { "version": "0.0.1", @@ -6234,8 +5990,7 @@ "ini": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", - "dev": true + "integrity": "sha1-7uJfVtscnsYIXgwid4CD9Zar+Sc=" }, "inline-source-map": { "version": "0.6.2", @@ -6248,7 +6003,7 @@ "inquirer": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.2.0.tgz", - "integrity": "sha512-QIEQG4YyQ2UYZGDC4srMZ7BjHOmNk1lR2JQj5UknBapklm6WHA+VVH7N+sUdX3A7NeCfGF8o4X1S3Ao7nAcIeg==", + "integrity": "sha1-Ua3Nd29mE2ncHolIWcJWCiJKvdg=", "dev": true, "requires": { "ansi-escapes": "^3.0.0", @@ -6275,7 +6030,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "dev": true, "requires": { "color-convert": "^1.9.0" @@ -6284,7 +6039,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "dev": true, "requires": { "ansi-styles": "^3.2.1", @@ -6301,7 +6056,7 @@ "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "integrity": "sha1-q5Pyeo3BPSjKyBXEYhQ6bZASrp4=", "dev": true, "requires": { "is-fullwidth-code-point": "^2.0.0", @@ -6322,7 +6077,7 @@ "insert-module-globals": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/insert-module-globals/-/insert-module-globals-7.2.0.tgz", - "integrity": "sha512-VE6NlW+WGn2/AeOMd496AHFYmE7eLKkUY6Ty31k4og5vmA3Fjuwe9v6ifH6Xx/Hz27QvdoMoviw1/pqWRB09Sw==", + "integrity": "sha1-7IfltCcoR54ye9XFxxYR3ftHUro=", "requires": { "JSONStream": "^1.0.3", "acorn-node": "^1.5.2", @@ -6339,8 +6094,7 @@ "invariant": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dev": true, + "integrity": "sha1-YQ88ksk1nOHbYW5TgAjSP/NRWOY=", "requires": { "loose-envify": "^1.0.0" } @@ -6358,8 +6112,7 @@ "is-absolute-url": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=", - "dev": true + "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=" }, "is-accessor-descriptor": { "version": "0.1.6", @@ -6385,7 +6138,7 @@ "is-buffer": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "integrity": "sha1-76ouqdqg16suoTqXsritUf776L4=" }, "is-builtin-module": { "version": "1.0.0", @@ -6398,14 +6151,12 @@ "is-callable": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", - "integrity": "sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==", - "dev": true + "integrity": "sha1-HhrfIZ4e62hNaR+dagX/DTCiTXU=" }, "is-color-stop": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=", - "dev": true, "requires": { "css-color-names": "^0.0.4", "hex-color-regex": "^1.1.0", @@ -6426,13 +6177,12 @@ "is-date-object": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.1.tgz", - "integrity": "sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=", - "dev": true + "integrity": "sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=" }, "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -6442,15 +6192,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "is-directory": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=", - "dev": true + "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=" }, "is-dotfile": { "version": "1.0.3", @@ -6510,13 +6259,12 @@ "is-obj": { "version": "1.0.1", "resolved": "http://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=", - "dev": true + "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=" }, "is-odd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-odd/-/is-odd-2.0.0.tgz", - "integrity": "sha512-OTiixgpZAT1M4NHgS5IguFp/Vz2VI3U7Goh4/HA1adtwyLtSBrxYlcSYkhpAE07s4fKEcjrFxyvtQBND4vFQyQ==", + "integrity": "sha1-dkZiRnH9fqVYzNmieVGC8pWPGyQ=", "requires": { "is-number": "^4.0.0" }, @@ -6524,7 +6272,7 @@ "is-number": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==" + "integrity": "sha1-ACbjf1RU1z41bf5lZGmYZ8an8P8=" } } }, @@ -6537,7 +6285,7 @@ "is-path-in-cwd": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz", - "integrity": "sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ==", + "integrity": "sha1-WsSLNF72dTOb1sekipEhELJBz1I=", "dev": true, "requires": { "is-path-inside": "^1.0.0" @@ -6555,13 +6303,12 @@ "is-plain-obj": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", - "dev": true + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" }, "is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "integrity": "sha1-LBY7P6+xtgbZ0Xko8FwqHDjgdnc=", "requires": { "isobject": "^3.0.1" }, @@ -6593,7 +6340,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.4.tgz", "integrity": "sha1-VRdIm1RwkbCTDglWVM7SXul+lJE=", - "dev": true, "requires": { "has": "^1.0.1" } @@ -6601,14 +6347,12 @@ "is-resolvable": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==", - "dev": true + "integrity": "sha1-+xj4fOH+uSUWnJpAfBkxijIG7Yg=" }, "is-svg": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-2.1.0.tgz", "integrity": "sha1-z2EJDaDZ77yrhyLeum8DIgjbsOk=", - "dev": true, "requires": { "html-comment-regex": "^1.1.0" } @@ -6616,8 +6360,7 @@ "is-symbol": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.1.tgz", - "integrity": "sha1-PMWfAAJRlLarLjjbrmaJJWtmBXI=", - "dev": true + "integrity": "sha1-PMWfAAJRlLarLjjbrmaJJWtmBXI=" }, "is-typedarray": { "version": "1.0.0", @@ -6627,8 +6370,7 @@ "is-url": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", - "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==", - "dev": true + "integrity": "sha1-BKTfRtKMTP89c9Af8Gq+sxihqlI=" }, "is-utf8": { "version": "0.2.1", @@ -6638,13 +6380,12 @@ "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" + "integrity": "sha1-0YUOuXkezRjmGCzhKjDzlmNLsZ0=" }, "is-wsl": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "dev": true + "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=" }, "isarray": { "version": "1.0.0", @@ -6672,12 +6413,12 @@ "jquery": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.3.1.tgz", - "integrity": "sha512-Ubldcmxp5np52/ENotGxlLe6aGMvmF4R8S6tZjsP6Knsaxd/xp3Zrh50cG93lR6nPXyUFwzN3ZSOQI0wRJNdGg==" + "integrity": "sha1-lYzinoHJeQ8xvneS311NlfxX+8o=" }, "jquery-datetimepicker": { "version": "2.5.20", "resolved": "https://registry.npmjs.org/jquery-datetimepicker/-/jquery-datetimepicker-2.5.20.tgz", - "integrity": "sha512-ugnjbUkOw1MWuJx+Aik9Reew9U2We+kGdaXU5NKvfdBNiG/vNeeFlgQ8EWu1h8zFf5wmUse7G1MLsYHTP18J4Q==", + "integrity": "sha1-aH1iBLkLA9yT9yX43wNuHQYfN6w=", "requires": { "jquery": ">= 1.7.2", "jquery-mousewheel": ">= 3.1.13", @@ -6702,13 +6443,12 @@ "js-base64": { "version": "2.4.3", "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.4.3.tgz", - "integrity": "sha512-H7ErYLM34CvDMto3GbD6xD0JLUGYXR3QTcH6B/tr4Hi/QpSThnCsIp+Sy5FRTw3B0d6py4HcNkW7nO/wdtGWEw==" + "integrity": "sha1-LlRewrDylX9BNWUQIFIU6Y+tZYI=" }, "js-beautify": { "version": "1.8.6", "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.8.6.tgz", - "integrity": "sha512-TYDZa+lg8vEC5U0OmGQEEwiZ0XFBfvZAUeNOtqflLe+woKuIqF4JzlsBx/C1KVYW5lUewZy2ODL4Obq6sH7a4Q==", - "dev": true, + "integrity": "sha1-an5h5HpuRftYxeIkme7TUPhgfZg=", "requires": { "config-chain": "~1.1.5", "editorconfig": "^0.15.0", @@ -6720,7 +6460,6 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz", "integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=", - "dev": true, "requires": { "abbrev": "1", "osenv": "^0.1.4" @@ -6731,7 +6470,7 @@ "js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "integrity": "sha1-GSA/tZmR35jjoocFDUZHzerzJJk=", "dev": true }, "js-yaml": { @@ -6759,14 +6498,12 @@ "jsesc": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "dev": true + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=" }, "json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true + "integrity": "sha1-u4Z8+zRQ5pEHwTHRxRS6s9yLyqk=" }, "json-schema": { "version": "0.2.3", @@ -6792,8 +6529,7 @@ "json5": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", - "dev": true + "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=" }, "jsonify": { "version": "0.0.0", @@ -6827,7 +6563,7 @@ "labeled-stream-splicer": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/labeled-stream-splicer/-/labeled-stream-splicer-2.0.1.tgz", - "integrity": "sha512-MC94mHZRvJ3LfykJlTUipBqenZz1pacOZEMhhQ8dMGcDHs0SBE5GbsavUXV7YtP3icBW17W0Zy1I0lfASmo9Pg==", + "integrity": "sha1-nP+jL9meFhL9HYao25YkFtUpKSY=", "requires": { "inherits": "^2.0.1", "isarray": "^2.0.4", @@ -6837,7 +6573,7 @@ "isarray": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.4.tgz", - "integrity": "sha512-GMxXOiUirWg1xTKRipM0Ek07rX+ubx4nNVElTJdNLYmNO/2YrDkgJGw9CljXn+r4EWiDQg/8lsRdHyg2PJuUaA==" + "integrity": "sha1-OOe8uw87obeTPIa6GJTd/DeBu7c=" } } }, @@ -6861,7 +6597,6 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", - "dev": true, "requires": { "prelude-ls": "~1.1.2", "type-check": "~0.3.2" @@ -6890,7 +6625,6 @@ "version": "0.2.17", "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz", "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=", - "dev": true, "requires": { "big.js": "^3.1.3", "emojis-list": "^2.0.0", @@ -6901,12 +6635,12 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" }, "lodash-es": { "version": "4.17.8", "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.8.tgz", - "integrity": "sha512-I9mjAxengFAleSThFhhAhvba6fsO0hunb9/0sQ6qQihSZsJRBofv2rYH58WXaOb/O++eUmYpCLywSQ22GfU+sA==" + "integrity": "sha1-b6jIxdM3SB3wvfHA2JnUJHMSHkU=" }, "lodash._basefor": { "version": "3.0.3", @@ -6921,14 +6655,12 @@ "lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=", - "dev": true + "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" }, "lodash.clone": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", - "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=", - "dev": true + "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=" }, "lodash.clonedeep": { "version": "4.5.0", @@ -6967,25 +6699,22 @@ "lodash.memoize": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=", - "dev": true + "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=" }, "lodash.mergewith": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/lodash.mergewith/-/lodash.mergewith-4.6.1.tgz", - "integrity": "sha512-eWw5r+PYICtEBgrBE5hhlT6aAa75f411bgDz/ZL2KZqYV03USvucsxcHUIlGTDTECs1eunpI7HOV7U+WLDvNdQ==" + "integrity": "sha1-Y5BX5ybDr72z59QnQcqo1uQzWSc=" }, "lodash.uniq": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=", - "dev": true + "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "log-symbols": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz", - "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==", - "dev": true, + "integrity": "sha1-V0Dhxdbw39pK2TI7UzIQfva0xAo=", "requires": { "chalk": "^2.0.1" }, @@ -6993,8 +6722,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -7002,8 +6730,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -7039,7 +6766,7 @@ "lru-cache": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.3.tgz", - "integrity": "sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA==", + "integrity": "sha1-oRdc80lt/IQ2wVbDNLSVWZK85pw=", "requires": { "pseudomap": "^1.0.2", "yallist": "^2.1.2" @@ -7079,7 +6806,7 @@ "masonry-layout": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/masonry-layout/-/masonry-layout-4.2.2.tgz", - "integrity": "sha512-iGtAlrpHNyxaR19CvKC3npnEcAwszXoyJiI8ARV2ePi7fmYhIud25MHK8Zx4P0LCC4d3TNO9+rFa1KoK1OEOaA==", + "integrity": "sha1-1XtErxPmAb/NxCPx3YNItVJN40g=", "requires": { "get-size": "^2.0.2", "outlayer": "^2.1.0" @@ -7088,8 +6815,7 @@ "math-expression-evaluator": { "version": "1.2.17", "resolved": "https://registry.npmjs.org/math-expression-evaluator/-/math-expression-evaluator-1.2.17.tgz", - "integrity": "sha1-3oGf282E3M2PrlnGrreWFbnSZqw=", - "dev": true + "integrity": "sha1-3oGf282E3M2PrlnGrreWFbnSZqw=" }, "math-random": { "version": "1.0.1", @@ -7108,8 +6834,7 @@ "mdn-data": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-1.2.0.tgz", - "integrity": "sha512-esDqNvsJB2q5V28+u7NdtdMg6Rmg4khQmAVSjUiX7BY/7haIv0K2yWM43hYp0or+3nvG7+UaTF1JHz31hgU1TA==", - "dev": true + "integrity": "sha1-6t0osPLTB88n5xUkYJv7dJ6/wLY=" }, "media-typer": { "version": "0.3.0", @@ -7142,7 +6867,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.0.4.tgz", "integrity": "sha1-pd5GU42uhNQRTMXqArR3KmNGcB8=", - "dev": true, "requires": { "source-map": "^0.5.6" } @@ -7150,8 +6874,7 @@ "merge2": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.2.2.tgz", - "integrity": "sha512-bgM8twH86rWni21thii6WCMQMRMmwqqdW3sGWi9IipnVAszdLXRjwDwAnyrVXo6DuP3AjRMMttZKUB48QWIFGg==", - "dev": true + "integrity": "sha1-AyEuPajYbE2FI869YxgZNBT5TjQ=" }, "methods": { "version": "1.1.2", @@ -7181,7 +6904,7 @@ "miller-rabin": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "integrity": "sha1-8IA1HIZbDcViqEYpZtqlNUPHik0=", "requires": { "bn.js": "^4.0.0", "brorand": "^1.0.1" @@ -7190,12 +6913,12 @@ "mime-db": { "version": "1.33.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==" + "integrity": "sha1-o0kgUKXLm2NFBUHjnZeI0icng9s=" }, "mime-types": { "version": "2.1.18", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "integrity": "sha1-bzI/YKg9ERRvgx/xH9ZuL+VQO7g=", "requires": { "mime-db": "~1.33.0" } @@ -7203,13 +6926,12 @@ "mimic-fn": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true + "integrity": "sha1-ggyGo5M0ZA6ZUWkovQP8qIBX0CI=" }, "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + "integrity": "sha1-LhlN4ERibUoQ5/f7wAznPoPk1cc=" }, "minimalistic-crypto-utils": { "version": "1.0.1", @@ -7219,7 +6941,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -7232,12 +6954,12 @@ "mithril": { "version": "0.2.8", "resolved": "https://registry.npmjs.org/mithril/-/mithril-0.2.8.tgz", - "integrity": "sha512-9XuGnVmS2OyFexUuP/CcJFFJjHLM+RGYBxyVRNyQ6khbMfDJIF/xyZ4zq18ZRfPagpFmWUFpjHd5ZqPULGZyNg==" + "integrity": "sha1-zG1HC5Inzrs0JxhlW9Ch16sq+5Y=" }, "mixin-deep": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz", - "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==", + "integrity": "sha1-pJ5yaNzhoNlpjkUybFYm3zVD0P4=", "requires": { "for-in": "^1.0.2", "is-extendable": "^1.0.1" @@ -7246,7 +6968,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -7277,7 +6999,7 @@ "mocha": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz", - "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==", + "integrity": "sha1-bYrlCPWRZ/lA8rWzxKYSrlDJCuY=", "requires": { "browser-stdout": "1.3.1", "commander": "2.15.1", @@ -7295,7 +7017,7 @@ "debug": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "integrity": "sha1-W7WgZyYotkFJVmuhaBnmFRjGcmE=", "requires": { "ms": "2.0.0" } @@ -7308,7 +7030,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -7401,12 +7123,12 @@ "nan": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/nan/-/nan-2.10.0.tgz", - "integrity": "sha512-bAdJv7fBLhWC+/Bls0Oza+mvTaNQtP+1RyhhhvD95pgUJz6XM5IzgmxOkItJ9tkoCiplvAnXI1tNmmUD/eScyA==" + "integrity": "sha1-ltDNYQ69WNS03pzAxoKM2pnHVI8=" }, "nanomatch": { "version": "1.2.9", "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.9.tgz", - "integrity": "sha512-n8R9bS8yQ6eSXaV6jHUpKzD8gLsin02w1HSFiegwrs9E098Ylhw5jdyKPaYqvHknHaSCKTPp7C8dGCQ0q9koXA==", + "integrity": "sha1-h59xUMstq3pHElkGbBBO7m4Pp8I=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -7435,14 +7157,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "nanosocket": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/nanosocket/-/nanosocket-1.1.0.tgz", - "integrity": "sha512-v2LsjYMRu3/JT/z8Qpkj1X5dgwCptC3D0NzeYlb7tb2qGJhlx0PSXZJraf1tRPKipj2iwB15GRzqUaOlN+LieQ==" + "integrity": "sha1-VrAEZ6o+Mzq8NOEInJRmgrFEYj8=" }, "natural-compare": { "version": "1.4.0", @@ -7458,7 +7180,7 @@ "ngrok": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/ngrok/-/ngrok-3.0.1.tgz", - "integrity": "sha512-+vRPAszUa+Ni9aZhbeT/iogv9cLlr6mtekspD4Ivrtc7EnZGV6q7PIjrUZIHyfT1zpq4+yIgqW//LIgGLbaMZA==", + "integrity": "sha1-G+D7WgLGGNpUWKpouDSUtWGoSvI=", "optional": true, "requires": { "@types/node": "^8.0.19", @@ -7471,19 +7193,17 @@ "nice-try": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", - "dev": true + "integrity": "sha1-ozeKdpbOfSI+iPybdkvX7xCJ42Y=" }, "node-forge": { "version": "0.7.6", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.7.6.tgz", - "integrity": "sha512-sol30LUpz1jQFBjOKwbjxijiE3b6pjd74YwfD0fJOKPjF+fONKb2Yg8rYgS6+bK6VDl+/wfr4IYpC7jDzLUIfw==", - "dev": true + "integrity": "sha1-/fO0GK7h+U8O9kLNY0hsd8qXJKw=" }, "node-gyp": { "version": "3.8.0", "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-3.8.0.tgz", - "integrity": "sha512-3g8lYefrRRzvGeSowdJKAKyks8oUpLEd/DyPV4eMhVlhJ0aNaZqIrNUIPuEWWTAoPqyFkfGrM67MC69baqn6vA==", + "integrity": "sha1-VAMEJhwzDoDQ1e3OJTpoyzlkIYw=", "requires": { "fstream": "^1.0.0", "glob": "^7.0.3", @@ -7509,8 +7229,7 @@ "node-libs-browser": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.1.0.tgz", - "integrity": "sha512-5AzFzdoIMb89hBGMZglEegffzgRg+ZFoUmisQ8HI4j1KDdpx13J0taNp2y9xPbur6W61gepGDDotGBVQ7mfUCg==", - "dev": true, + "integrity": "sha1-X5QmPUBPbkR2fXJpAf/wVHjWAN8=", "requires": { "assert": "^1.1.1", "browserify-zlib": "^0.2.0", @@ -7540,8 +7259,7 @@ "browserify-zlib": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "dev": true, + "integrity": "sha1-KGlFnZqjviRf6P4sofRuLn9U1z8=", "requires": { "pako": "~1.0.5" } @@ -7549,16 +7267,14 @@ "pako": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.6.tgz", - "integrity": "sha512-lQe48YPsMJAig+yngZ87Lus+NF+3mtu7DVOBu6b/gHO1YpKwIj5AWjZ/TOS7i46HD/UixzWb1zeWDZfGZ3iYcg==", - "dev": true + "integrity": "sha1-AQEhG6pwxLykoPY/Igbpe3368lg=" } } }, "node-releases": { "version": "1.0.0-alpha.11", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.0.0-alpha.11.tgz", - "integrity": "sha512-CaViu+2FqTNYOYNihXa5uPS/zry92I3vPU4nCB6JB3OeZ2UGtOpF5gRwuN4+m3hbEcL47bOXyun1jX2iC+3uEQ==", - "dev": true, + "integrity": "sha1-c8gQrMLlt0Ghfd+7Od/Kmrk1nYo=", "requires": { "semver": "^5.3.0" } @@ -7566,7 +7282,7 @@ "node-sass": { "version": "4.9.3", "resolved": "https://registry.npmjs.org/node-sass/-/node-sass-4.9.3.tgz", - "integrity": "sha512-XzXyGjO+84wxyH7fV6IwBOTrEBe2f0a6SBze9QWWYR/cL74AcQUks2AsqcCZenl/Fp/JVbuEaLpgrLtocwBUww==", + "integrity": "sha1-9AfPPWb3gwi7HjRrJPpChwMZYiQ=", "requires": { "async-foreach": "^0.1.3", "chalk": "^1.1.1", @@ -7600,7 +7316,7 @@ "normalize-package-data": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", - "integrity": "sha512-9jjUFbTPfEy3R/ad/2oNbKtW9Hgovl5O1FvFWKkKblNXoN/Oou6+9+KKohPK13Yc3/TyunyWhJp6gvRNR/PPAw==", + "integrity": "sha1-EvlaMH1YNSB1oEkHuErIvpisAS8=", "requires": { "hosted-git-info": "^2.1.4", "is-builtin-module": "^1.0.0", @@ -7619,14 +7335,12 @@ "normalize-range": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", - "dev": true + "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=" }, "normalize-url": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-1.9.1.tgz", "integrity": "sha1-LMDWazHqIwNkWENuNiDYWVTGbDw=", - "dev": true, "requires": { "object-assign": "^4.0.1", "prepend-http": "^1.0.0", @@ -7637,12 +7351,12 @@ "npm-font-open-sans": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/npm-font-open-sans/-/npm-font-open-sans-1.1.0.tgz", - "integrity": "sha512-t1y5ShWm6a8FSLwBdINT47XYMcuKY2rkTBsTdz/76YB2MtX0YD89RUkY2eSS2/XOmlZfBe1HFBAwD+b9+/UfmQ==" + "integrity": "sha1-jCelbkOHJ0e4RI3MMGU6ZJhmxu8=" }, "npmlog": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", - "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", + "integrity": "sha1-CKfyqL9zRgR3mp76StXMcXq7lUs=", "requires": { "are-we-there-yet": "~1.1.2", "console-control-strings": "~1.1.0", @@ -7654,7 +7368,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.1.tgz", "integrity": "sha1-mSms32KPwsQQmN6rgqxYDPFJquQ=", - "dev": true, "requires": { "boolbase": "~1.0.0" } @@ -7662,8 +7375,7 @@ "num2fraction": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=", - "dev": true + "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" }, "number-is-nan": { "version": "1.0.1", @@ -7703,8 +7415,7 @@ "object-inspect": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.4.1.tgz", - "integrity": "sha512-wqdhLpfCUbEsoEwl3FXwGyv8ief1k/1aUdIPCqVnupM6e8l63BEJdiF/0swtn04/8p05tG/T0FrpTlfwvljOdw==", - "dev": true + "integrity": "sha1-N/+xDnGtrzdI0F9xO0yUUvQCy8Q=" }, "object-visit": { "version": "1.0.1", @@ -7725,7 +7436,6 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz", "integrity": "sha1-h1jIRvW0B62rDyNuCYbxSwUcqhY=", - "dev": true, "requires": { "define-properties": "^1.1.2", "es-abstract": "^1.5.1" @@ -7759,7 +7469,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.0.4.tgz", "integrity": "sha1-5STaCbT2b/Bd9FdUbscqyZ8TBpo=", - "dev": true, "requires": { "define-properties": "^1.1.2", "es-abstract": "^1.6.1", @@ -7792,7 +7501,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, "requires": { "mimic-fn": "^1.0.0" } @@ -7800,8 +7508,7 @@ "opn": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/opn/-/opn-5.3.0.tgz", - "integrity": "sha512-bYJHo/LOmoTd+pfiYhfZDnf9zekVJrY+cnS2a5F2x+w5ppvTqObojTP7WiFG+kVZs9Inw+qQ/lw7TroWwhdd2g==", - "dev": true, + "integrity": "sha1-ZIcVZchjh18FLP31PT48ta21Oxw=", "requires": { "is-wsl": "^1.1.0" } @@ -7810,7 +7517,6 @@ "version": "0.8.2", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", - "dev": true, "requires": { "deep-is": "~0.1.3", "fast-levenshtein": "~2.0.4", @@ -7823,16 +7529,14 @@ "wordwrap": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", - "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", - "dev": true + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=" } } }, "ora": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/ora/-/ora-2.1.0.tgz", - "integrity": "sha512-hNNlAd3gfv/iPmsNxYoAPLvxg7HuPozww7fFonMZvL84tP6Ox5igfk5j/+a9rtJJwqMgKK+JgWsAQik5o0HTLA==", - "dev": true, + "integrity": "sha1-bK8oMOuSSUGGHsU6FzeZ4Ai1Hls=", "requires": { "chalk": "^2.3.1", "cli-cursor": "^2.1.0", @@ -7845,14 +7549,12 @@ "ansi-regex": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" }, "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -7860,8 +7562,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -7872,7 +7573,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, "requires": { "ansi-regex": "^3.0.0" } @@ -7905,7 +7605,7 @@ "osenv": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz", - "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==", + "integrity": "sha1-hc36+uso6Gd/QW4odZK18/SepBA=", "requires": { "os-homedir": "^1.0.0", "os-tmpdir": "^1.0.0" @@ -7929,8 +7629,7 @@ "parcel-bundler": { "version": "1.9.7", "resolved": "https://registry.npmjs.org/parcel-bundler/-/parcel-bundler-1.9.7.tgz", - "integrity": "sha512-x+RiXe/C+aOoFuw+acH/NKjKmUJ/2zbFWFUS/KE5jBk2ErsN0Dc3OxLpmEaeIMU4oMPWFeNm5mRXcXdeUwf7GA==", - "dev": true, + "integrity": "sha1-XNAIUN6gJU03cAXVWrS/YEKborw=", "requires": { "ansi-to-html": "^0.6.4", "babel-code-frame": "^6.26.0", @@ -7991,14 +7690,12 @@ "ansi-regex": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" }, "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -8006,20 +7703,17 @@ "arr-diff": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" }, "array-unique": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" }, "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dev": true, + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -8037,7 +7731,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -8047,8 +7740,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -8058,14 +7750,12 @@ "clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", - "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=", - "dev": true + "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=" }, "cross-spawn": { "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dev": true, + "integrity": "sha1-Sl7Hxk364iw6FBJNus3uhG2Ay8Q=", "requires": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -8077,8 +7767,7 @@ "cssnano": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.0.tgz", - "integrity": "sha512-7x24b/ghbrQv2QRgqMR12H3ZZ38xYCKJSXfg21YCtnIE177/NyvMkeiuQdWauIgMjySaTZ+cd5PN2qvfbsGeSw==", - "dev": true, + "integrity": "sha1-aCw3uEubffYWRQpajckmm5vRBzQ=", "requires": { "cosmiconfig": "^5.0.0", "cssnano-preset-default": "^4.0.0", @@ -8090,7 +7779,6 @@ "version": "2.1.4", "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dev": true, "requires": { "debug": "^2.3.3", "define-property": "^0.2.5", @@ -8105,7 +7793,6 @@ "version": "0.2.5", "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, "requires": { "is-descriptor": "^0.1.0" } @@ -8114,7 +7801,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -8123,7 +7809,6 @@ "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -8132,7 +7817,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -8143,7 +7827,6 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -8152,7 +7835,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -8162,8 +7844,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -8173,16 +7854,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dev": true, + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -8198,7 +7877,6 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, "requires": { "is-descriptor": "^1.0.0" } @@ -8207,7 +7885,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -8218,7 +7895,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dev": true, "requires": { "extend-shallow": "^2.0.1", "is-number": "^3.0.0", @@ -8230,7 +7906,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, "requires": { "is-extendable": "^0.1.0" } @@ -8240,8 +7915,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -8249,8 +7923,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -8258,8 +7931,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -8269,14 +7941,12 @@ "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=" }, "is-glob": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.0.tgz", "integrity": "sha1-lSHHaEXMJhCoUgPd8ICpWML/q8A=", - "dev": true, "requires": { "is-extglob": "^2.1.1" } @@ -8285,7 +7955,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, "requires": { "kind-of": "^3.0.2" }, @@ -8294,7 +7963,6 @@ "version": "3.2.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, "requires": { "is-buffer": "^1.1.5" } @@ -8304,14 +7972,12 @@ "isobject": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" }, "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", - "dev": true, + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -8320,8 +7986,7 @@ "json5": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dev": true, + "integrity": "sha1-d5+wAYYE+oVOrL9iUhgNg1Q+Pb4=", "requires": { "minimist": "^1.2.0" } @@ -8329,14 +7994,12 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dev": true, + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -8356,14 +8019,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "strip-ansi": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, "requires": { "ansi-regex": "^3.0.0" } @@ -8381,7 +8042,7 @@ "parse-asn1": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.1.tgz", - "integrity": "sha512-KPx7flKXg775zZpnp9SxJlz00gTd4BmJ2yJufSc44gMCRrRQ7NSzAcSJQfifuOLgW6bEi+ftrALtsgALeB2Adw==", + "integrity": "sha1-9r8pOBgzK9DatU77Fgh3JHRebKg=", "requires": { "asn1.js": "^4.0.0", "browserify-aes": "^1.0.0", @@ -8393,7 +8054,7 @@ "browserify-aes": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "integrity": "sha1-Mmc0ZC9APavDADIJhTu3CtQo70g=", "requires": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -8458,8 +8119,7 @@ "path-key": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "dev": true + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { "version": "1.0.5", @@ -8501,7 +8161,7 @@ "pbkdf2": { "version": "3.0.16", "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.16.tgz", - "integrity": "sha512-y4CXP3thSxqf7c0qmOF+9UeOTrifiVTIM+u7NWlq+PRsHbr7r7dpCmvzrZxa96JJUNi0Y5w9VqG5ZNeCVMoDcA==", + "integrity": "sha1-dAQgjsawG2LYW/g4U6gGT42cKlw=", "requires": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -8513,7 +8173,7 @@ "ripemd160": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "integrity": "sha1-ocGm9iR1FXe6XQeRTLyShQWFiQw=", "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -8522,7 +8182,7 @@ "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -8543,8 +8203,7 @@ "physical-cpu-count": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/physical-cpu-count/-/physical-cpu-count-2.0.0.tgz", - "integrity": "sha1-GN4vl+S/epVRrXURlCtUlverpmA=", - "dev": true + "integrity": "sha1-GN4vl+S/epVRrXURlCtUlverpmA=" }, "pinkie": { "version": "2.0.4", @@ -8562,18 +8221,18 @@ "platform": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/platform/-/platform-1.3.5.tgz", - "integrity": "sha512-TuvHS8AOIZNAlE77WUDiR4rySV/VMptyMfcfeoMgs4P8apaZM3JrnbzBiixKUv+XR6i+BXrQh8WAnjaSPFO65Q==" + "integrity": "sha1-+2lYxpbgfikY0u7aDwvJRI1zNEQ=" }, "pluralize": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-7.0.0.tgz", - "integrity": "sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow==", + "integrity": "sha1-KYuJ34uTsCIdv0Ia0rGx6iP8Z3c=", "dev": true }, "popper": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/popper/-/popper-1.0.1.tgz", - "integrity": "sha512-i/6yRlY5+VomX0ScQ5TI4Ro8XlQbOj7NMRf0hSnUEVv/aAP6IbrxvNcRsrEG6VsKzvltfINPeD4p++6SKwOTSA==", + "integrity": "sha1-oxU5EKAc4yiil8skGSSv/waQg34=", "requires": { "browser-icons": "*", "browserify": "^12.0.0", @@ -8615,8 +8274,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", - "dev": true, + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -8626,8 +8284,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -8635,8 +8292,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -8646,8 +8302,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" } } }, @@ -8655,7 +8310,6 @@ "version": "5.3.1", "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-5.3.1.tgz", "integrity": "sha1-d7rnypKK2FcW4v2kLyYb98HWW14=", - "dev": true, "requires": { "postcss": "^5.0.2", "postcss-message-helpers": "^2.0.0", @@ -8665,14 +8319,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8684,7 +8336,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8695,7 +8346,6 @@ "version": "2.2.2", "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-2.2.2.tgz", "integrity": "sha1-ZjFBfV8OkJo9fsJrJMio0eT5bks=", - "dev": true, "requires": { "colormin": "^1.0.5", "postcss": "^5.0.13", @@ -8705,14 +8355,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8724,7 +8372,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8735,7 +8382,6 @@ "version": "2.6.1", "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-2.6.1.tgz", "integrity": "sha1-u9hZPFwf0uPRwyK7kl3K6Nrk1i0=", - "dev": true, "requires": { "postcss": "^5.0.11", "postcss-value-parser": "^3.1.2" @@ -8744,14 +8390,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8763,7 +8407,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8774,7 +8417,6 @@ "version": "2.0.4", "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-2.0.4.tgz", "integrity": "sha1-vv6J+v1bPazlzM5Rt2uBUUvgDj0=", - "dev": true, "requires": { "postcss": "^5.0.14" }, @@ -8782,14 +8424,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8801,7 +8441,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8812,7 +8451,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-2.1.0.tgz", "integrity": "sha1-uavye4isGIFYpesSq8riAmO5GTI=", - "dev": true, "requires": { "postcss": "^5.0.4" }, @@ -8820,14 +8458,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8839,7 +8475,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8850,7 +8485,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-2.1.0.tgz", "integrity": "sha1-0rS9nVztXr2Nyt52QMfXzX9PkrU=", - "dev": true, "requires": { "postcss": "^5.0.14" }, @@ -8858,14 +8492,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8877,7 +8509,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8888,7 +8519,6 @@ "version": "0.1.1", "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-0.1.1.tgz", "integrity": "sha1-ix6vVU9ob7KIzYdMVWZ7CqNmjVg=", - "dev": true, "requires": { "postcss": "^5.0.16" }, @@ -8896,14 +8526,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8915,7 +8543,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8926,7 +8553,6 @@ "version": "2.2.3", "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-2.2.3.tgz", "integrity": "sha1-vOMLLMWR/8Y0Mitfs0ZLbZNPRDM=", - "dev": true, "requires": { "postcss": "^5.0.14", "uniqs": "^2.0.0" @@ -8935,14 +8561,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8954,7 +8578,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -8964,8 +8587,7 @@ "postcss-filter-plugins": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/postcss-filter-plugins/-/postcss-filter-plugins-2.0.3.tgz", - "integrity": "sha512-T53GVFsdinJhgwm7rg1BzbeBRomOg9y5MBVhGcsV0CxurUdVj1UlPdKtn7aqYA/c/QVkzKMjq2bSV5dKG5+AwQ==", - "dev": true, + "integrity": "sha1-giRf34IzcEFkXkdxFNjlk6oYuOw=", "requires": { "postcss": "^5.0.4" }, @@ -8973,14 +8595,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -8992,7 +8612,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9003,7 +8622,6 @@ "version": "2.1.7", "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-2.1.7.tgz", "integrity": "sha1-TFUwMTwI4dWzu/PSu8dH4njuonA=", - "dev": true, "requires": { "has": "^1.0.1", "postcss": "^5.0.10", @@ -9013,14 +8631,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9032,7 +8648,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9043,7 +8658,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-2.0.2.tgz", "integrity": "sha1-I9kM0Sewp3mUkVMyc5A0oaTz1lg=", - "dev": true, "requires": { "postcss": "^5.0.4" }, @@ -9051,14 +8665,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9070,7 +8682,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9081,7 +8692,6 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-2.1.2.tgz", "integrity": "sha1-0d9d+qexrMO+VT8OnhDofGG19yE=", - "dev": true, "requires": { "browserslist": "^1.5.2", "caniuse-api": "^1.5.2", @@ -9094,7 +8704,6 @@ "version": "1.7.7", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-1.7.7.tgz", "integrity": "sha1-C9dnBCWL6CmyOYu1Dkti0aFmsLk=", - "dev": true, "requires": { "caniuse-db": "^1.0.30000639", "electron-to-chromium": "^1.2.7" @@ -9103,14 +8712,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9122,7 +8729,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9132,14 +8738,12 @@ "postcss-message-helpers": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/postcss-message-helpers/-/postcss-message-helpers-2.0.0.tgz", - "integrity": "sha1-pPL0+rbk/gAvCu0ABHjN9S+bpg4=", - "dev": true + "integrity": "sha1-pPL0+rbk/gAvCu0ABHjN9S+bpg4=" }, "postcss-minify-font-values": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-1.0.5.tgz", "integrity": "sha1-S1jttWZB66fIR0qzUmyv17vey2k=", - "dev": true, "requires": { "object-assign": "^4.0.1", "postcss": "^5.0.4", @@ -9149,14 +8753,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9168,7 +8770,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9179,7 +8780,6 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-1.0.5.tgz", "integrity": "sha1-Xb2hE3NwP4PPtKPqOIHY11/15uE=", - "dev": true, "requires": { "postcss": "^5.0.12", "postcss-value-parser": "^3.3.0" @@ -9188,14 +8788,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9207,7 +8805,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9218,7 +8815,6 @@ "version": "1.2.2", "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-1.2.2.tgz", "integrity": "sha1-rSzgcTc7lDs9kwo/pZo1jCjW8fM=", - "dev": true, "requires": { "alphanum-sort": "^1.0.1", "postcss": "^5.0.2", @@ -9229,14 +8825,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9248,7 +8842,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9259,7 +8852,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-2.1.1.tgz", "integrity": "sha1-ssapjAByz5G5MtGkllCBFDEXNb8=", - "dev": true, "requires": { "alphanum-sort": "^1.0.2", "has": "^1.0.1", @@ -9270,14 +8862,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9289,7 +8879,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9299,8 +8888,7 @@ "postcss-modules": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/postcss-modules/-/postcss-modules-1.3.2.tgz", - "integrity": "sha512-QujH5ZpPtr1fBWTKDa43Hx45gm7p19aEtHaAtkMCBZZiB/D5za2wXSMtAf94tDUZHF3F5KZcTXISUNqgEQRiDw==", - "dev": true, + "integrity": "sha1-CmFrhDh/H2DdKKAfWXaH6Ft7hIE=", "requires": { "css-modules-loader-core": "^1.1.0", "generic-names": "^1.0.3", @@ -9312,8 +8900,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -9321,8 +8908,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -9332,8 +8918,7 @@ "postcss": { "version": "7.0.2", "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.2.tgz", - "integrity": "sha512-fmaUY5370keLUTx+CnwRxtGiuFTcNBLQBqr1oE3WZ/euIYmGAo0OAgOhVJ3ByDnVmOR3PK+0V9VebzfjRIUcqw==", - "dev": true, + "integrity": "sha1-e1oQneNWgE4n+VqWC+8OTVvJuxg=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -9343,8 +8928,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" } } }, @@ -9352,7 +8936,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.2.0.tgz", "integrity": "sha1-99gMOYxaOT+nlkRmvRlQCn1hwGk=", - "dev": true, "requires": { "css-selector-tokenizer": "^0.7.0", "postcss": "^6.0.1" @@ -9361,8 +8944,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -9370,8 +8952,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -9381,14 +8962,12 @@ "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", - "dev": true, + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -9398,14 +8977,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", - "dev": true, + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -9416,7 +8993,6 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-1.1.0.tgz", "integrity": "sha1-1upkmUx5+XtipytCb75gVqGUu5A=", - "dev": true, "requires": { "css-selector-tokenizer": "^0.7.0", "postcss": "^6.0.1" @@ -9425,8 +9001,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -9434,8 +9009,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -9445,14 +9019,12 @@ "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", - "dev": true, + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -9462,14 +9034,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", - "dev": true, + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -9480,7 +9050,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-1.3.0.tgz", "integrity": "sha1-7P+p1+GSUYOJ9CrQ6D9yrsRW6iA=", - "dev": true, "requires": { "icss-replace-symbols": "^1.1.0", "postcss": "^6.0.1" @@ -9489,8 +9058,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -9498,8 +9066,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -9509,14 +9076,12 @@ "has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" }, "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", - "dev": true, + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -9526,14 +9091,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", - "dev": true, + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -9544,7 +9107,6 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-1.1.1.tgz", "integrity": "sha1-757nEhLX/nWceO0WL2HtYrXLk/E=", - "dev": true, "requires": { "postcss": "^5.0.5" }, @@ -9552,14 +9114,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9571,7 +9131,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9582,7 +9141,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.0.tgz", "integrity": "sha1-lQ4Me+NEV3ChYP/9a2ZEw8DNj4k=", - "dev": true, "requires": { "cssnano-util-get-match": "^4.0.0", "postcss": "^6.0.0", @@ -9593,7 +9151,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.0.tgz", "integrity": "sha1-7pNDq5gbgixjq3JhXszNCFZERaM=", - "dev": true, "requires": { "cssnano-util-get-arguments": "^4.0.0", "has": "^1.0.0", @@ -9605,7 +9162,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.0.tgz", "integrity": "sha1-txHFks8W+vn/V15C+hALZ5kIPv8=", - "dev": true, "requires": { "cssnano-util-get-arguments": "^4.0.0", "cssnano-util-get-match": "^4.0.0", @@ -9617,7 +9173,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.0.tgz", "integrity": "sha1-cYy20wpvrGrGqDDjLAbAfbxm/l0=", - "dev": true, "requires": { "has": "^1.0.0", "postcss": "^6.0.0", @@ -9628,7 +9183,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.0.tgz", "integrity": "sha1-A1HymIaqmB1D2RssK9GuptCvbSM=", - "dev": true, "requires": { "cssnano-util-get-match": "^4.0.0", "postcss": "^6.0.0", @@ -9639,7 +9193,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.0.tgz", "integrity": "sha1-Ws1dR7rqXRdnSyzMSuUWb6iM35c=", - "dev": true, "requires": { "postcss": "^6.0.0", "postcss-value-parser": "^3.0.0" @@ -9649,7 +9202,6 @@ "version": "3.0.8", "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-3.0.8.tgz", "integrity": "sha1-EI90s/L82viRov+j6kWSJ5/HgiI=", - "dev": true, "requires": { "is-absolute-url": "^2.0.0", "normalize-url": "^1.4.0", @@ -9660,14 +9212,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9679,7 +9229,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9690,7 +9239,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.0.tgz", "integrity": "sha1-HafnaxCuY8EYJ/oE/Du0oe/pnMA=", - "dev": true, "requires": { "postcss": "^6.0.0", "postcss-value-parser": "^3.0.0" @@ -9700,7 +9248,6 @@ "version": "2.2.3", "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-2.2.3.tgz", "integrity": "sha1-7sbCpntsQSqNsgQud/6NpD+VwR0=", - "dev": true, "requires": { "postcss": "^5.0.4", "postcss-value-parser": "^3.0.1" @@ -9709,14 +9256,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9728,7 +9273,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9739,7 +9283,6 @@ "version": "2.4.0", "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-2.4.0.tgz", "integrity": "sha1-wsbSDMlYKE9qv75j92Cb9AkFmtM=", - "dev": true, "requires": { "postcss": "^5.0.4", "postcss-value-parser": "^3.0.2" @@ -9748,14 +9291,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9767,7 +9308,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9778,7 +9318,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-1.0.1.tgz", "integrity": "sha1-aPgGlfBF0IJjqHmtJA343WT2ROo=", - "dev": true, "requires": { "postcss": "^5.0.4" }, @@ -9786,14 +9325,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9805,7 +9342,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9816,7 +9352,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-1.0.4.tgz", "integrity": "sha1-/3b02CEkN7McKYpC0uFEQCV3GuE=", - "dev": true, "requires": { "has": "^1.0.1", "postcss": "^5.0.8", @@ -9826,14 +9361,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9845,7 +9378,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9856,7 +9388,6 @@ "version": "2.2.3", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-2.2.3.tgz", "integrity": "sha1-+UN3iGBsPJrO4W/+jYsWKX8nu5A=", - "dev": true, "requires": { "flatten": "^1.0.2", "indexes-of": "^1.0.1", @@ -9867,7 +9398,6 @@ "version": "2.1.6", "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-2.1.6.tgz", "integrity": "sha1-tt8YqmE7Zm4TPwittSGcJoSsEI0=", - "dev": true, "requires": { "is-svg": "^2.0.0", "postcss": "^5.0.14", @@ -9878,14 +9408,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9897,7 +9425,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9908,7 +9435,6 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-2.0.2.tgz", "integrity": "sha1-mB1X0p3csz57Hf4f1DuGSfkzyh0=", - "dev": true, "requires": { "alphanum-sort": "^1.0.1", "postcss": "^5.0.4", @@ -9918,14 +9444,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9937,7 +9461,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9947,14 +9470,12 @@ "postcss-value-parser": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.0.tgz", - "integrity": "sha1-h/OPnxj3dKSrTIojL1xc6IcqnRU=", - "dev": true + "integrity": "sha1-h/OPnxj3dKSrTIojL1xc6IcqnRU=" }, "postcss-zindex": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-2.2.0.tgz", "integrity": "sha1-0hCd3AVbka9n/EyzsCWUZjnSryI=", - "dev": true, "requires": { "has": "^1.0.1", "postcss": "^5.0.4", @@ -9964,14 +9485,12 @@ "has-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=" }, "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", - "dev": true, + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -9983,7 +9502,6 @@ "version": "3.2.3", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", - "dev": true, "requires": { "has-flag": "^1.0.0" } @@ -9993,8 +9511,7 @@ "posthtml": { "version": "0.11.3", "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.11.3.tgz", - "integrity": "sha512-quMHnDckt2DQ9lRi6bYLnuyBDnVzK+McHa8+ar4kTdYbWEo/92hREOu3h70ZirudOOp/my2b3r0m5YtxY52yrA==", - "dev": true, + "integrity": "sha1-F+opIbBVW3RV8zyXe9Fti4y3Tyc=", "requires": { "object-assign": "^4.1.1", "posthtml-parser": "^0.3.3", @@ -10004,8 +9521,7 @@ "posthtml-parser": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.3.3.tgz", - "integrity": "sha512-H/Z/yXGwl49A7hYQLV1iQ3h87NE0aZ/PMZhFwhw3lKeCAN+Ti4idrHvVvh4/GX10I7u77aQw+QB4vV5/Lzvv5A==", - "dev": true, + "integrity": "sha1-P+mG/KnwDA8QnXMbpZCxkvJud20=", "requires": { "htmlparser2": "^3.9.2", "isobject": "^2.1.0", @@ -10017,8 +9533,7 @@ "posthtml-parser": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.4.1.tgz", - "integrity": "sha512-h7vXIQ21Ikz2w5wPClPakNP6mJeJCK6BT0GpqnQrNNABdR7/TchNlFyryL1Bz6Ww53YWCKkr6tdZuHlxY1AVdQ==", - "dev": true, + "integrity": "sha1-lbeP73Zvu+Cm+GG26VWCvD0f+TM=", "requires": { "htmlparser2": "^3.9.2", "object-assign": "^4.1.1" @@ -10027,20 +9542,17 @@ "posthtml-render": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/posthtml-render/-/posthtml-render-1.1.4.tgz", - "integrity": "sha512-jL6eFIzoN3xUEvbo33OAkSDE2VIKU4JQ1wENOows1DpfnrdapR/K3Q1/fB43Mq7wQlcSgRm23nFrvoioufM7eA==", - "dev": true + "integrity": "sha1-ldrAmJL08YP61ayCPwj0LAJWVR4=" }, "prelude-ls": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", - "dev": true + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=" }, "prepend-http": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", - "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=", - "dev": true + "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" }, "preserve": { "version": "0.2.0", @@ -10050,8 +9562,7 @@ "private": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", - "dev": true + "integrity": "sha1-I4Hts2ifelPWUxkAYPz4ItLzaP8=" }, "process": { "version": "0.11.10", @@ -10061,7 +9572,7 @@ "process-nextick-args": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", - "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==" + "integrity": "sha1-o31zL0JxtKsa0HDTVQjoKQeI/6o=" }, "progress": { "version": "2.0.0", @@ -10072,13 +9583,12 @@ "proto-list": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", - "dev": true + "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=" }, "proxy-addr": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz", - "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==", + "integrity": "sha1-7PxzO/Iv+Mb0B/onUye5q2fki5M=", "requires": { "forwarded": "~0.1.2", "ipaddr.js": "1.8.0" @@ -10092,7 +9602,7 @@ "public-encrypt": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.2.tgz", - "integrity": "sha512-4kJ5Esocg8X3h8YgJsKAuoesBgB7mqH3eowiDzMUPKiRDDE7E/BqqZD1hnTByIaAFiwAw246YEltSq7tdrOH0Q==", + "integrity": "sha1-RuuRByBr9zSJ+LhbadkTNMZhCZQ=", "requires": { "bn.js": "^4.1.0", "browserify-rsa": "^4.0.0", @@ -10114,13 +9624,12 @@ "qs": { "version": "6.5.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + "integrity": "sha1-yzroBuh0BERYTvFUzo7pjUA/PjY=" }, "query-string": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/query-string/-/query-string-4.3.4.tgz", "integrity": "sha1-u7aTucqRXCMlFbIosaArYJBD2+s=", - "dev": true, "requires": { "object-assign": "^4.1.0", "strict-uri-encode": "^1.0.0" @@ -10140,7 +9649,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/quote-stream/-/quote-stream-1.0.2.tgz", "integrity": "sha1-hJY/jJwmuULhU/7rU6rnRlK34LI=", - "dev": true, "requires": { "buffer-equal": "0.0.1", "minimist": "^1.1.3", @@ -10150,8 +9658,7 @@ "buffer-equal": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-0.0.1.tgz", - "integrity": "sha1-kbx0sR6kBbyRa8aqkI+q+ltKrEs=", - "dev": true + "integrity": "sha1-kbx0sR6kBbyRa8aqkI+q+ltKrEs=" } } }, @@ -10163,7 +9670,7 @@ "randomatic": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.0.0.tgz", - "integrity": "sha512-VdxFOIEY3mNO5PtSRkkle/hPJDHvQhK21oa73K4yAc9qmp6N429gAyF1gZMOTMeS0/AYzaV/2Trcef+NaIonSA==", + "integrity": "sha1-01SQAw6091eN4pLObfsEqRoSiSM=", "requires": { "is-number": "^4.0.0", "kind-of": "^6.0.0", @@ -10173,19 +9680,19 @@ "is-number": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==" + "integrity": "sha1-ACbjf1RU1z41bf5lZGmYZ8an8P8=" }, "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "randombytes": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.0.6.tgz", - "integrity": "sha512-CIQ5OFxf4Jou6uOKe9t1AOgqpeU5fd70A8NPdHSGeYXqXsPe6peOwI0cUl88RWZ6sP1vPMV3avd/R6cZ5/sP1A==", + "integrity": "sha1-0wLFIpSFiISKjTAMkytEwkIx2oA=", "requires": { "safe-buffer": "^5.1.0" } @@ -10193,7 +9700,7 @@ "randomfill": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "integrity": "sha1-ySGW/IarQr6YPxvzF3giSTHWFFg=", "requires": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" @@ -10268,7 +9775,7 @@ "readable-stream": { "version": "2.3.6", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "integrity": "sha1-sRwn2IuP8fvgcGQ8+UsMea4bCq8=", "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -10293,7 +9800,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -10313,7 +9820,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-1.3.0.tgz", "integrity": "sha1-dHyRTgSWFKTJz7umKYca0dKSdxY=", - "dev": true, "requires": { "balanced-match": "^0.4.2", "math-expression-evaluator": "^1.2.14", @@ -10323,8 +9829,7 @@ "balanced-match": { "version": "0.4.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.4.2.tgz", - "integrity": "sha1-yz8+PHMtwPAe5wtAPzAuYddwmDg=", - "dev": true + "integrity": "sha1-yz8+PHMtwPAe5wtAPzAuYddwmDg=" } } }, @@ -10332,7 +9837,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/reduce-function-call/-/reduce-function-call-1.0.2.tgz", "integrity": "sha1-WiAL+S4ON3UXUv5FsKszD9S2vpk=", - "dev": true, "requires": { "balanced-match": "^0.4.2" }, @@ -10340,8 +9844,7 @@ "balanced-match": { "version": "0.4.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-0.4.2.tgz", - "integrity": "sha1-yz8+PHMtwPAe5wtAPzAuYddwmDg=", - "dev": true + "integrity": "sha1-yz8+PHMtwPAe5wtAPzAuYddwmDg=" } } }, @@ -10353,7 +9856,7 @@ "redux": { "version": "3.7.2", "resolved": "https://registry.npmjs.org/redux/-/redux-3.7.2.tgz", - "integrity": "sha512-pNqnf9q1hI5HHZRBkj3bAngGZW/JMCmexDlOxw4XagXY2o1327nHH54LoTjiPJ0gizoqPDRqWyX/00g0hD6w+A==", + "integrity": "sha1-BrcxIyFZAdJdBlvjQusCa8HIU3s=", "requires": { "lodash": "^4.2.1", "lodash-es": "^4.2.1", @@ -10364,7 +9867,7 @@ "lodash": { "version": "4.17.5", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.5.tgz", - "integrity": "sha512-svL3uiZf1RwhH+cWrfZn3A4+U58wbP0tGVTLQPbjplZxZ8ROD9VLuNgsRniTlLe7OlSqR79RUehXgpBW/s0IQw==" + "integrity": "sha1-maktZcAnLevoyWtgV7yPv6O+1RE=" } } }, @@ -10389,27 +9892,24 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" } } }, "regenerate": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.3.3.tgz", - "integrity": "sha512-jVpo1GadrDAK59t/0jRx5VxYWQEDkkEKi6+HjE3joFVLfDOh9Xrdh0dF1eSq+BI/SwvTQ44gSscJ8N5zYL61sg==", - "dev": true + "integrity": "sha1-DDNtOYBVPXVcObWGrjsgqknIK38=" }, "regenerator-runtime": { "version": "0.11.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", - "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==", - "dev": true + "integrity": "sha1-vgWtf5v30i4Fb5cmzuUBf78Z4uk=" }, "regenerator-transform": { "version": "0.10.1", "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", - "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", - "dev": true, + "integrity": "sha1-HkmWg3Ix2ot/PPQRTXG1aRoGgN0=", "requires": { "babel-runtime": "^6.18.0", "babel-types": "^6.19.0", @@ -10419,7 +9919,7 @@ "regex-cache": { "version": "0.4.4", "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", - "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", + "integrity": "sha1-db3FiioUls7EihKDW8VMjVYjNt0=", "requires": { "is-equal-shallow": "^0.1.3" } @@ -10427,7 +9927,7 @@ "regex-not": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "integrity": "sha1-H07OJ+ALC2XgJHpoEOaoXYOldSw=", "requires": { "extend-shallow": "^3.0.2", "safe-regex": "^1.1.0" @@ -10436,14 +9936,13 @@ "regexpp": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.0.tgz", - "integrity": "sha512-g2FAVtR8Uh8GO1Nv5wpxW7VFVwHcCEr4wyA8/MHiRkO8uHoR5ntAA8Uq3P1vvMTX/BeQiRVSpDGLd+Wn5HNOTA==", + "integrity": "sha1-sqdTSoXKGwM7z1zp/45W1OB1U2U=", "dev": true }, "regexpu-core": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-1.0.0.tgz", "integrity": "sha1-hqdj9Y7k18L2sQLkdkBQ3n7ZDGs=", - "dev": true, "requires": { "regenerate": "^1.2.1", "regjsgen": "^0.2.0", @@ -10453,14 +9952,12 @@ "regjsgen": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", - "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=", - "dev": true + "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=" }, "regjsparser": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", "integrity": "sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw=", - "dev": true, "requires": { "jsesc": "~0.5.0" } @@ -10491,7 +9988,7 @@ "request": { "version": "2.87.0", "resolved": "https://registry.npmjs.org/request/-/request-2.87.0.tgz", - "integrity": "sha512-fcogkm7Az5bsS6Sl0sibkbhcKsnyon/jV1kF3ajGmF0c8HrttdKTPRT9hieOaQHA5HEq6r8OyWOo/o781C1tNw==", + "integrity": "sha1-MvACNc0I1IK00NaNuTqCnA7VdW4=", "requires": { "aws-sign2": "~0.7.0", "aws4": "^1.6.0", @@ -10558,7 +10055,7 @@ "resolve": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.7.0.tgz", - "integrity": "sha512-QdgZ5bjR1WAlpLaO5yHepFvC+o3rCr6wpfE2tpJNMkXdulf2jKomQBdNRQITF3ZKHNlT71syG98yQP03gasgnA==", + "integrity": "sha1-K99TdIESByhd8N9lK3jxGKuPPF4=", "requires": { "path-parse": "^1.0.5" } @@ -10578,7 +10075,6 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, "requires": { "onetime": "^2.0.0", "signal-exit": "^3.0.2" @@ -10587,24 +10083,22 @@ "ret": { "version": "0.1.15", "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" + "integrity": "sha1-uKSCXVvbH8P29Twrwz+BOIaBx7w=" }, "rgb-regex": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=", - "dev": true + "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=" }, "rgba-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=", - "dev": true + "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=" }, "rijs": { "version": "0.9.1", "resolved": "https://registry.npmjs.org/rijs/-/rijs-0.9.1.tgz", - "integrity": "sha512-Hl5yWFZUdVePXIOHRrFXGxQZ2+fzWucqqx/aQjkE0PxbmNyOY0WA/SWdDA1eKeqb7lh2a0vcchR9mZLiQ9rHFQ==", + "integrity": "sha1-agVpqJjMWl+Sv9Wa2GbI7xqfJ4c=", "requires": { "rijs.components": "*", "rijs.core": "*", @@ -10623,7 +10117,7 @@ "rijs.components": { "version": "3.1.16", "resolved": "https://registry.npmjs.org/rijs.components/-/rijs.components-3.1.16.tgz", - "integrity": "sha512-7TneWZIILv20erfzKtU1xnwtFSxiB+/9rwdS/3WGkugUbaXZF811kNfvaOlE4+BEj08CV8o0Dkasih3p4NV/dw==", + "integrity": "sha1-l7QyRvGELkWluXu4zOOrro0OVLU=", "requires": { "@compone/define": "^1.2.4", "utilise": "^2.3.5" @@ -10632,7 +10126,7 @@ "rijs.core": { "version": "1.2.6", "resolved": "https://registry.npmjs.org/rijs.core/-/rijs.core-1.2.6.tgz", - "integrity": "sha512-bB/tay726eZomQe91ciIuSGM1zDNyIuOkKdg6jRvYOGR8N30x5qHoADVgCEJgpgqlsPjmuBq6qPsJ3Pw4Nv6Uw==", + "integrity": "sha1-Y4zVoBuq4Q/M9XaDFygCrfN6dBQ=", "requires": { "colors": "^1.1.0", "utilise": "^2.3.5" @@ -10641,7 +10135,7 @@ "rijs.css": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/rijs.css/-/rijs.css-1.2.4.tgz", - "integrity": "sha512-2VKq0iWFki9gZMntUCoOCJVxn/o7tOZu7L0MzD7srPKiynaTsk8A7PjTwFmds1vdJ995v8adg3ax0eS5i+Jbow==", + "integrity": "sha1-8MInuljk0KUOcmGpBTapYlTdyd8=", "requires": { "djbx": "^1.0.3", "utilise": "^2.3.3" @@ -10650,7 +10144,7 @@ "rijs.data": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/rijs.data/-/rijs.data-2.2.4.tgz", - "integrity": "sha512-zvR1GzRzcqZOeD7K+YVV7MmvLeLFbrLsrkxkEW570WiLQsnjTaZ6hLnftXlHLnPMWMIrGfs8gh6BJLWY3XcjXA==", + "integrity": "sha1-e6A8MK+0Y6bqQMjKi8KzoDOTniY=", "requires": { "utilise": "^2.3.5" } @@ -10658,7 +10152,7 @@ "rijs.fn": { "version": "1.2.6", "resolved": "https://registry.npmjs.org/rijs.fn/-/rijs.fn-1.2.6.tgz", - "integrity": "sha512-v/xM7OOzS8HXqGA0y9ey/D0YOyAjiujKJT17/4U0CqViVhMi+0AsJCSuJqSMS4CkzLpy3CqdgKkRD6hnet3i+w==", + "integrity": "sha1-sqJPLEDrK9xYEZ7e3cY7MPuEis4=", "requires": { "browser-resolve": "^1.11.2", "utilise": "^2.3.5" @@ -10667,7 +10161,7 @@ "rijs.npm": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/rijs.npm/-/rijs.npm-2.0.0.tgz", - "integrity": "sha512-xRg5+MH0H5fDe9aMi68xZHq3E6AH1+fouGIJ+a7uFoirnbYdP9YqMK3QYFnvslF0Is08Rhx/R9P/7vwpI1N4rg==", + "integrity": "sha1-PdHYaB41mOgTN51E5sBVGfojCIk=", "requires": { "browser-resolve": "^1.11.2", "browserify": "^14.5.0", @@ -10678,7 +10172,7 @@ "browserify": { "version": "14.5.0", "resolved": "https://registry.npmjs.org/browserify/-/browserify-14.5.0.tgz", - "integrity": "sha512-gKfOsNQv/toWz+60nSPfYzuwSEdzvV2WdxrVPUbPD/qui44rAkB3t3muNtmmGYHqrG56FGwX9SUEQmzNLAeS7g==", + "integrity": "sha1-C7vOUhrNbk0dVNjpNlAI77hanMU=", "requires": { "JSONStream": "^1.0.3", "assert": "^1.4.0", @@ -10732,7 +10226,7 @@ "browserify-zlib": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "integrity": "sha1-KGlFnZqjviRf6P4sofRuLn9U1z8=", "requires": { "pako": "~1.0.5" } @@ -10740,7 +10234,7 @@ "buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.2.1.tgz", - "integrity": "sha512-c+Ko0loDaFfuPWiL02ls9Xd3GO3cPVmUobQ6t3rXNUk304u6hGq+8N/kFi+QEIKhzK3uwolVhLzszmfLmMLnqg==", + "integrity": "sha1-3Vf6DxCaxZxgJHkETcp7iz0LcdY=", "requires": { "base64-js": "^1.0.2", "ieee754": "^1.1.4" @@ -10792,7 +10286,7 @@ "pako": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.6.tgz", - "integrity": "sha512-lQe48YPsMJAig+yngZ87Lus+NF+3mtu7DVOBu6b/gHO1YpKwIj5AWjZ/TOS7i46HD/UixzWb1zeWDZfGZ3iYcg==" + "integrity": "sha1-AQEhG6pwxLykoPY/Igbpe3368lg=" }, "process-nextick-args": { "version": "1.0.7", @@ -10802,7 +10296,7 @@ "string_decoder": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", - "integrity": "sha512-4AH6Z5fzNNBcH+6XDMfA/BTt87skxqJlO0lAh3Dker5zThcAxG6mKz+iGu308UKoPPQ8Dcqx/4JhujzltRa+hQ==", + "integrity": "sha1-D8Z9fBQYJd6UKC3VNr7GubzoYKs=", "requires": { "safe-buffer": "~5.1.0" } @@ -10820,7 +10314,7 @@ "rijs.pages": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/rijs.pages/-/rijs.pages-1.3.0.tgz", - "integrity": "sha512-230MZ7oyDVPoTBQSHuA3vqawikFgRtvoVRaRq7utWPpbo7NpDO7TaOsJmzM66WPypMXStH5ijyFADXwrqYJvdg==", + "integrity": "sha1-jBSVFY8TbzB0O9mfIj4l0NdAt3s=", "requires": { "compression": "^1.6.2", "express": "^4.16.2", @@ -10831,7 +10325,7 @@ "rijs.resdir": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/rijs.resdir/-/rijs.resdir-1.4.4.tgz", - "integrity": "sha512-l31HSXq13Rqaxr5wfY4d2MBFdTwmgG1A/FxGNrBc31E2WgBe4Hj86LhnRQage4SN8RjVZnf1JWWakbS7BVfoBw==", + "integrity": "sha1-Qe1ALd42ELux3UJVHHkQGUT48C8=", "requires": { "chokidar": "^1.0.5", "glob": "^7.1.2", @@ -10842,7 +10336,7 @@ "anymatch": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", - "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "integrity": "sha1-VT3Lj5HjyImEXf26NMd3IbkLnXo=", "requires": { "micromatch": "^2.1.5", "normalize-path": "^2.0.0" @@ -10869,7 +10363,7 @@ "rijs.serve": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/rijs.serve/-/rijs.serve-1.1.1.tgz", - "integrity": "sha512-BZ4tNnMakHvfv0pLVLm1xtN7fncAnux5n57A1RsFOma1Y2wexM/ww8BHwQrsCkSYP+3ujfljthyb1J3HJGwXpA==", + "integrity": "sha1-N6vPTpPd1p948j8ReDxed5tDRxU=", "requires": { "compression": "^1.7.2", "express": "^4.16.3", @@ -10879,7 +10373,7 @@ "rijs.sessions": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/rijs.sessions/-/rijs.sessions-1.1.2.tgz", - "integrity": "sha512-vj9iV8ov5awAnDy5x28FEezafbMClO/1JhnBIIsQg9DQ5vQBysPlwyiSHtNelXmE9gFbaxnsPD1/mUc3hm3FsQ==", + "integrity": "sha1-WlzJSSnx92j3bfszr1LASOatg3w=", "requires": { "cookie-parser": "^1.3.5", "express-session": "^1.15.3", @@ -10889,7 +10383,7 @@ "rijs.singleton": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/rijs.singleton/-/rijs.singleton-1.0.0.tgz", - "integrity": "sha512-QeVEkimxkU0v06NnMYkKsj7R2AzFewG2FH1wMuUtO88n7gY7C/zdbFkNbYeWxqL+tuK+eLYWGFuoburTNM7rXQ==", + "integrity": "sha1-dOf78Yao5NwM07AO1jd6YSj0tpE=", "requires": { "utilise": "*" } @@ -10897,8 +10391,9 @@ "rijs.sync": { "version": "2.3.5", "resolved": "https://registry.npmjs.org/rijs.sync/-/rijs.sync-2.3.5.tgz", - "integrity": "sha512-tcbhmjLyWb+2s2gdiSmROEoD/OQPFeKC9xBnKgs0H+umY8CaVrVPGFdr1y1qovm7HxUbdk/BKqi94GQDc5XB3A==", + "integrity": "sha1-hyjG19cqgBcvy6MWsn0KQ/MESas=", "requires": { + "buble": "github:pemrouz/buble#4e639aeeb64712ac95dc30a52750d1ee4432c9c8", "express": "^4.14.0", "lru_map": "^0.3.3", "platform": "^1.3.4", @@ -10916,7 +10411,7 @@ }, "buble": { "version": "github:pemrouz/buble#4e639aeeb64712ac95dc30a52750d1ee4432c9c8", - "from": "github:pemrouz/buble#4e639aeeb64712ac95dc30a52750d1ee4432c9c8", + "from": "github:pemrouz/buble", "requires": { "acorn": "^5.1.2", "acorn-jsx": "^3.0.1", @@ -10926,16 +10421,6 @@ "minimist": "^1.2.0", "os-homedir": "^1.0.1", "vlq": "^0.2.2" - }, - "dependencies": { - "acorn5-object-spread": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/acorn5-object-spread/-/acorn5-object-spread-4.0.0.tgz", - "integrity": "sha1-1XWAge7ZcSGrC+R+Mcqu8qo5lpc=", - "requires": { - "acorn": "^5.1.2" - } - } } }, "chalk": { @@ -10961,7 +10446,7 @@ "rimraf": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", - "integrity": "sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w==", + "integrity": "sha1-LtgVDSShbqhlHm1u8PR8QVjOejY=", "requires": { "glob": "^7.0.5" } @@ -10978,7 +10463,7 @@ "rxjs": { "version": "6.3.2", "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.3.2.tgz", - "integrity": "sha512-hV7criqbR0pe7EeL3O66UYVg92IR0XsA97+9y+BWTePK9SKmEI5Qd3Zj6uPnGkNzXsBywBQWTvujPl+1Kn9Zjw==", + "integrity": "sha1-amiLFsTm6YDmLqgF7DBkjhxgkH8=", "dev": true, "requires": { "tslib": "^1.9.0" @@ -10987,7 +10472,7 @@ "safe-buffer": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", - "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" + "integrity": "sha1-iTMSr2myEj3vcfV4iQAWce6yyFM=" }, "safe-regex": { "version": "1.1.0", @@ -11000,13 +10485,12 @@ "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "integrity": "sha1-RPoWGwGHuVSd2Eu5GAL5vYOFzWo=" }, "safer-eval": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/safer-eval/-/safer-eval-1.2.3.tgz", - "integrity": "sha512-nDwXOhiheoaBT6op02n8wzsshjLXHhh4YAeqsDEoVmy1k2+lGv/ENLsGaWqkaKArUkUx48VO12/ZPa3sI/OEqQ==", - "dev": true, + "integrity": "sha1-c7p0o0vIoH1qRBNcgV/Rio7r56A=", "requires": { "clones": "^1.1.0" } @@ -11062,8 +10546,7 @@ "sax": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==", - "dev": true + "integrity": "sha1-KBYjTiN4vdxOU1T6tcqold9xANk=" }, "scss-tokenizer": { "version": "0.2.3", @@ -11101,12 +10584,12 @@ "semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", - "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==" + "integrity": "sha1-3Eu8emyp2Rbe5dQ1FvAJK1j3uKs=" }, "send": { "version": "0.16.2", "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", - "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "integrity": "sha1-bsyh4PjBVtFBWXVZhI32RzCmu8E=", "requires": { "debug": "2.6.9", "depd": "~1.1.2", @@ -11126,15 +10609,14 @@ "mime": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", - "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" + "integrity": "sha1-Eh+evEnjdm8xGnbh+hyAA8SwOqY=" } } }, "serialize-to-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/serialize-to-js/-/serialize-to-js-1.2.1.tgz", - "integrity": "sha512-TK6d30GNkOLeFDPuP6Jfy1Q1V31GxzppYTt2lzr8KWmIUKomFj+260QP5o4AhHLu0pr6urgyS8i/Z1PqurjBoA==", - "dev": true, + "integrity": "sha1-Lof2H5OIJtJMRjp8vQ3Skp7DgAg=", "requires": { "js-beautify": "^1.7.5", "safer-eval": "^1.2.3" @@ -11143,7 +10625,7 @@ "serve-static": { "version": "1.13.2", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", - "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", + "integrity": "sha1-CV6Ecv1bRiN9tQzkhqQ/S4bGzsE=", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -11164,7 +10646,7 @@ "set-value": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", - "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "integrity": "sha1-ca5KiPD+77v1LR6mBPP7MV67YnQ=", "requires": { "extend-shallow": "^2.0.1", "is-extendable": "^0.1.1", @@ -11185,19 +10667,17 @@ "setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=", - "dev": true + "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=" }, "setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + "integrity": "sha1-0L2FU2iHtv58DYGMuWLZ2RxU5lY=" }, "shallow-copy": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/shallow-copy/-/shallow-copy-0.0.1.tgz", - "integrity": "sha1-QV9CcC1z2BAzApLMXuhurhoRoXA=", - "dev": true + "integrity": "sha1-QV9CcC1z2BAzApLMXuhurhoRoXA=" }, "shasum": { "version": "1.0.2", @@ -11219,7 +10699,7 @@ "sha.js": { "version": "2.4.11", "resolved": "http://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -11231,7 +10711,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dev": true, "requires": { "shebang-regex": "^1.0.0" } @@ -11239,8 +10718,7 @@ "shebang-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "dev": true + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" }, "shell-quote": { "version": "1.6.1", @@ -11253,14 +10731,23 @@ "jsonify": "~0.0.0" } }, + "shepherd.js": { + "version": "2.0.0-beta.18", + "resolved": "https://registry.npmjs.org/shepherd.js/-/shepherd.js-2.0.0-beta.18.tgz", + "integrity": "sha512-emi0WfQBQYf/qfZmm7yzvVCrjPCIYVudvbhN0eaTwLmNdrtnZvZFRSc+7aA1BoOVgUpgU0UF7WjyqNG9Da0kow==", + "requires": { + "element-matches": "^0.1.2", + "lodash": "^4.17.10", + "popper.js": "^1.14.3" + } + }, "shuup-static-build-tools": { "version": "file:../../lib/shuup_static_build_tools" }, "sigmund": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/sigmund/-/sigmund-1.0.1.tgz", - "integrity": "sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA=", - "dev": true + "integrity": "sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA=" }, "signal-exit": { "version": "3.0.2", @@ -11276,7 +10763,6 @@ "version": "0.2.2", "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "dev": true, "requires": { "is-arrayish": "^0.3.1" }, @@ -11284,21 +10770,19 @@ "is-arrayish": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", - "dev": true + "integrity": "sha1-RXSirlb3qyBolvtDHq7tBm/fjwM=" } } }, "slash": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", - "dev": true + "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=" }, "slice-ansi": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-1.0.0.tgz", - "integrity": "sha512-POqxBK6Lb3q6s047D/XsDVNPnF9Dl8JSaqe9h9lURl0OdNqy/ujDrOiIHtsqXMGbWWTIomRzAMaTyawAU//Reg==", + "integrity": "sha1-BE8aSdiEL/MHqta1Be0Xi9lQE00=", "dev": true, "requires": { "is-fullwidth-code-point": "^2.0.0" @@ -11315,7 +10799,7 @@ "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "integrity": "sha1-ZJIufFZbDhQgS6GqfWlkJ40lGC0=", "requires": { "base": "^0.11.1", "debug": "^2.2.0", @@ -11348,7 +10832,7 @@ "snapdragon-node": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "integrity": "sha1-bBdfhv8UvbByRWPo88GwIaKGhTs=", "requires": { "define-property": "^1.0.0", "isobject": "^3.0.0", @@ -11366,7 +10850,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -11374,7 +10858,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -11382,7 +10866,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -11397,14 +10881,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "snapdragon-util": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "integrity": "sha1-+VZHlIbyrNeXAGk/b3uAXkWrVuI=", "requires": { "kind-of": "^3.2.0" } @@ -11412,7 +10896,7 @@ "sntp": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/sntp/-/sntp-2.1.0.tgz", - "integrity": "sha512-FL1b58BDrqS3A11lJ0zEdnJ3UOKqVxawAkF3k7F0CVN7VQ34aZrV+G8BZ1WC9ZL7NyrwsW0oviwsWDgRuVYtJg==", + "integrity": "sha1-LGzsFP7cIiJznK+bXD2F0cxaLMg=", "requires": { "hoek": "4.x.x" } @@ -11421,7 +10905,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", "integrity": "sha1-RBttTTRnmPG05J6JIK37oOVD+a0=", - "dev": true, "requires": { "is-plain-obj": "^1.0.0" } @@ -11439,7 +10922,7 @@ "source-map-resolve": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.1.tgz", - "integrity": "sha512-0KW2wvzfxm8NCTb30z0LMNyPqWCdDGE2viwzUaucqJdkTRXtZiSY3I+2A6nVAjmdOy0I4gU8DwnVVGsk9jvP2A==", + "integrity": "sha1-etD1k/IoFZjoVN+A8ZquS5LXoRo=", "requires": { "atob": "^2.0.0", "decode-uri-component": "^0.2.0", @@ -11451,8 +10934,7 @@ "source-map-support": { "version": "0.4.18", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", - "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", - "dev": true, + "integrity": "sha1-Aoam3ovkJkEzhZTpfM6nXwosWF8=", "requires": { "source-map": "^0.5.6" } @@ -11465,7 +10947,7 @@ "spdx-correct": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.0.0.tgz", - "integrity": "sha512-N19o9z5cEyc8yQQPukRCZ9EUmb4HUpnrmaL/fxS2pBo2jbfcFRVuFZ/oFC+vZz0MNNk0h80iMn5/S6qGZOL5+g==", + "integrity": "sha1-BaW01xU6GVvJLDxCW2nzsqlSTII=", "requires": { "spdx-expression-parse": "^3.0.0", "spdx-license-ids": "^3.0.0" @@ -11474,12 +10956,12 @@ "spdx-exceptions": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz", - "integrity": "sha512-4K1NsmrlCU1JJgUrtgEeTVyfx8VaYea9J9LvARxhbHtVtohPs/gFGG5yy49beySjlIMhhXZ4QqujIZEfS4l6Cg==" + "integrity": "sha1-LHrmEFbHFKW5ubKyr30xHvXHj+k=" }, "spdx-expression-parse": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", - "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "integrity": "sha1-meEZt6XaAOBUkcn6M4t5BII7QdA=", "requires": { "spdx-exceptions": "^2.1.0", "spdx-license-ids": "^3.0.0" @@ -11488,12 +10970,12 @@ "spdx-license-ids": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz", - "integrity": "sha512-2+EPwgbnmOIl8HjGBXXMd9NAu02vLjOO1nWw4kmeRDFyHn+M/ETfHxQUK0oXg8ctgVnl9t3rosNVsZ1jG61nDA==" + "integrity": "sha1-enzShHDMbToc/m1miG9rxDDTrIc=" }, "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "integrity": "sha1-fLCd2jqGWFcFxks5pkZgOGguj+I=", "requires": { "extend-shallow": "^3.0.0" } @@ -11522,14 +11004,12 @@ "stable": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", - "dev": true + "integrity": "sha1-g26zyDgv4pNv6vVEYxAXzn1Ho88=" }, "static-eval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.0.tgz", - "integrity": "sha512-6flshd3F1Gwm+Ksxq463LtFd1liC77N/PX1FVVc3OzL3hAmo2fwHFbuArkcfi7s9rTNsLEhcRmXGFZhlgy40uw==", - "dev": true, + "integrity": "sha1-DoIfiSaEfe97S1DNpdVcBKmxOGQ=", "requires": { "escodegen": "^1.8.1" } @@ -11556,8 +11036,7 @@ "static-module": { "version": "2.2.5", "resolved": "https://registry.npmjs.org/static-module/-/static-module-2.2.5.tgz", - "integrity": "sha512-D8vv82E/Kpmz3TXHKG8PPsCPg+RAX6cbCOyvjM6x04qZtQ47EtJFVwRsdov3n5d6/6ynrOY9XB4JkaZwB2xoRQ==", - "dev": true, + "integrity": "sha1-vUCrzq4z2mt6+4Sg5DKf+IUr+78=", "requires": { "concat-stream": "~1.6.0", "convert-source-map": "^1.5.1", @@ -11579,7 +11058,6 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", - "dev": true, "requires": { "readable-stream": "^2.0.2" } @@ -11587,8 +11065,7 @@ "magic-string": { "version": "0.22.5", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.22.5.tgz", - "integrity": "sha512-oreip9rJZkzvA8Qzk9HFs8fZGF/u7H/gtrE8EN6RjKJ9kh2HlC+yQ2QezifqTZfGyiuAV0dRv5a+y/8gBb1m9w==", - "dev": true, + "integrity": "sha1-jpz1r930Q4XB2lvCpqDb0QsDZX4=", "requires": { "vlq": "^0.2.2" } @@ -11598,12 +11075,12 @@ "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" + "integrity": "sha1-u3PURtonlhBu/MG2AaJT1sRr0Ic=" }, "stdout-stream": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/stdout-stream/-/stdout-stream-1.4.1.tgz", - "integrity": "sha512-j4emi03KXqJWcIeF8eIXkjMFN1Cmb8gUlDYGeBALLPo5qdyTfA9bOtl8m33lRoC+vFMkP3gl0WsDr6+gzxbbTA==", + "integrity": "sha1-WsF0zdXNcmEEqgwLK9g4FdjVNd4=", "requires": { "readable-stream": "^2.0.1" } @@ -11645,7 +11122,7 @@ "stream-http": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.1.tgz", - "integrity": "sha512-cQ0jo17BLca2r0GfRdZKYAGLU6JRoIWxqSOakUMuKOT6MOK7AAlE856L33QuDmAy/eeOrhLee3dZKX0Uadu93A==", + "integrity": "sha1-0EQb4aRXpzpzOop7U1cL69nvZqQ=", "requires": { "builtin-status-codes": "^3.0.0", "inherits": "^2.0.1", @@ -11666,14 +11143,12 @@ "strict-uri-encode": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=", - "dev": true + "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" }, "string-hash": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/string-hash/-/string-hash-1.1.3.tgz", - "integrity": "sha1-6Kr8CsGFW0Zmkp7X3RJ1311sgRs=", - "dev": true + "integrity": "sha1-6Kr8CsGFW0Zmkp7X3RJ1311sgRs=" }, "string-width": { "version": "1.0.2", @@ -11688,7 +11163,7 @@ "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "integrity": "sha1-nPFhG6YmhdcDCunkujQUnDrwP8g=", "requires": { "safe-buffer": "~5.1.0" } @@ -11696,7 +11171,7 @@ "stringstream": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.6.tgz", - "integrity": "sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA==" + "integrity": "sha1-eIAiWw1K0Q4wkn0Weh1vL9OzOnI=" }, "strip-ansi": { "version": "3.0.1", @@ -11726,7 +11201,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.0.tgz", "integrity": "sha1-ZLMjlRxKJOX8ey7AbBN78y0VXoo=", - "dev": true, "requires": { "browserslist": "^4.0.0", "postcss": "^6.0.0", @@ -11736,8 +11210,7 @@ "browserslist": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.1.1.tgz", - "integrity": "sha512-VBorw+tgpOtZ1BYhrVSVTzTt/3+vSE3eFUh0N2GCFK1HffceOaf32YS/bs6WiFhjDAblAFrx85jMy3BG9fBK2Q==", - "dev": true, + "integrity": "sha1-Mo60/xIVsS32WJ6auC+K2qT8jNY=", "requires": { "caniuse-lite": "^1.0.30000884", "electron-to-chromium": "^1.3.62", @@ -11747,14 +11220,12 @@ "caniuse-lite": { "version": "1.0.30000885", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000885.tgz", - "integrity": "sha512-cXKbYwpxBLd7qHyej16JazPoUacqoVuDhvR61U7Fr5vSxMUiodzcYa1rQYRYfZ5GexV03vGZHd722vNPLjPJGQ==", - "dev": true + "integrity": "sha1-6Inp+OflDnafKkljTJMriu5iKYQ=" }, "postcss-selector-parser": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", - "dev": true, "requires": { "dot-prop": "^4.1.1", "indexes-of": "^1.0.1", @@ -11774,12 +11245,12 @@ "summernote": { "version": "0.8.10", "resolved": "https://registry.npmjs.org/summernote/-/summernote-0.8.10.tgz", - "integrity": "sha512-1b4ESCiY9HW+12HYXCntjbThVgeYNaYKfKL7pC4Jqjo/WDS4G4mMtd2kPuCw56HxeRT67d+zlehopaE+M4o6aQ==" + "integrity": "sha1-IaXX8Yo7B1ALWLYNWQdBelSJdSA=" }, "supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "integrity": "sha1-4uaaRKyHcveKHsCzW2id9lMO/I8=", "requires": { "has-flag": "^3.0.0" } @@ -11788,7 +11259,6 @@ "version": "0.7.2", "resolved": "https://registry.npmjs.org/svgo/-/svgo-0.7.2.tgz", "integrity": "sha1-n1dyQTlSE1xv779Ar+ak+qiLS7U=", - "dev": true, "requires": { "coa": "~1.0.1", "colors": "~1.1.2", @@ -11802,12 +11272,12 @@ "symbol-observable": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-1.2.0.tgz", - "integrity": "sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ==" + "integrity": "sha1-wiaIrtTqs83C3+rLtWFmBWCgCAQ=" }, "syntax-error": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/syntax-error/-/syntax-error-1.4.0.tgz", - "integrity": "sha512-YPPlu67mdnHGTup2A8ff7BC2Pjq0e0Yp/IyTFN03zWO0RcK07uLcbi7C2KpGR2FvWbaB0+bfE27a+sBKebSo7w==", + "integrity": "sha1-LZ1P9cBkrLcRWUo+O5UFStUdkHw=", "requires": { "acorn-node": "^1.2.0" } @@ -11815,7 +11285,7 @@ "table": { "version": "4.0.3", "resolved": "http://registry.npmjs.org/table/-/table-4.0.3.tgz", - "integrity": "sha512-S7rnFITmBH1EnyKcvxBh1LjYeQMmnZtCXSEbHcH6S0NoKit24ZuFO/T1vDcLdYsLQkM188PVVhQmzKIuThNkKg==", + "integrity": "sha1-ALXitgLxeUuayvnKkIp2OGp4E7w=", "dev": true, "requires": { "ajv": "^6.0.1", @@ -11829,7 +11299,7 @@ "ajv": { "version": "6.5.3", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.5.3.tgz", - "integrity": "sha512-LqZ9wY+fx3UMiiPd741yB2pj3hhil+hQc8taf4o2QGRFpWgZ2V5C8HA165DY9sS3fJwsk7uT7ZlFEyC3Ig3lLg==", + "integrity": "sha1-caVp0Yns9PTzISJP7LFm8HHdkPk=", "dev": true, "requires": { "fast-deep-equal": "^2.0.1", @@ -11847,7 +11317,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "dev": true, "requires": { "color-convert": "^1.9.0" @@ -11856,7 +11326,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "dev": true, "requires": { "ansi-styles": "^3.2.1", @@ -11879,13 +11349,13 @@ "json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=", "dev": true }, "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "integrity": "sha1-q5Pyeo3BPSjKyBXEYhQ6bZASrp4=", "dev": true, "requires": { "is-fullwidth-code-point": "^2.0.0", @@ -11916,7 +11386,7 @@ "tar-stream": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.1.tgz", - "integrity": "sha512-IFLM5wp3QrJODQFPm6/to3LJZrONdBY/otxcvDIQzu217zKye6yVR3hhi9lAjrC2Z+m/j5oDxMPb1qcd8cIvpA==", + "integrity": "sha1-+E7xaWJp1iI8pI9uHu7eP36B85U=", "requires": { "bl": "^1.0.0", "buffer-alloc": "^1.1.0", @@ -11930,8 +11400,7 @@ "terser": { "version": "3.8.2", "resolved": "https://registry.npmjs.org/terser/-/terser-3.8.2.tgz", - "integrity": "sha512-FGSBXiBJe2TSXy6pWwXpY0YcEWEK35UKL64BBbxX3aHqM4Nj0RMqXvqBuoSGfyd80t8MKQ5JwYm5jRRGTSEFNg==", - "dev": true, + "integrity": "sha1-SLiA+Un40DispN/QCjfFPZbs+fs=", "requires": { "commander": "~2.17.1", "source-map": "~0.6.1", @@ -11941,20 +11410,17 @@ "commander": { "version": "2.17.1", "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz", - "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==", - "dev": true + "integrity": "sha1-vXerfebelCBc6sxy8XFtKfIKd78=" }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "source-map-support": { "version": "0.5.9", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.9.tgz", - "integrity": "sha512-gR6Rw4MvUlYy83vP0vxoVNzM6t8MUXqNuRsuBmBHQDu1Fh6X015FrLdgoDKcNdkwGubozq0P4N0Q37UyFVr1EA==", - "dev": true, + "integrity": "sha1-QbyVOyU0Jn6i1gW8z6e/oxEc7V8=", "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -11962,19 +11428,6 @@ } } }, - "tether": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/tether/-/tether-1.4.4.tgz", - "integrity": "sha512-bagKeRRo3vEynHnO3GB7/jB3Q4YIf0mN7gXM/nR0wZvNHkPrwmZemg1w0C32JZP0prHZUwxGwoX5CdA7tuIDEw==" - }, - "tether-shepherd": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/tether-shepherd/-/tether-shepherd-1.8.1.tgz", - "integrity": "sha1-T9my/dkFnwS2xCSUVtWuFFo+kiM=", - "requires": { - "tether": "^1.0.1" - } - }, "text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -11998,8 +11451,7 @@ "timers-browserify": { "version": "2.0.10", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.10.tgz", - "integrity": "sha512-YvC1SV1XdOUaL6gx5CoGroT3Gu49pK9+TZ38ErPldOWW4j49GI1HKs9DV+KGq/w6y+LZ72W1c8cKz2vzY+qpzg==", - "dev": true, + "integrity": "sha1-HSjj0qrfHVpZlsTp+VYBzQU0gK4=", "requires": { "setimmediate": "^1.0.4" } @@ -12007,19 +11459,17 @@ "timsort": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=", - "dev": true + "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" }, "tiny-inflate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/tiny-inflate/-/tiny-inflate-1.0.2.tgz", - "integrity": "sha1-k9nez/yIBb1X6uQxDwt0Xptvs6c=", - "dev": true + "integrity": "sha1-k9nez/yIBb1X6uQxDwt0Xptvs6c=" }, "tmp": { "version": "0.0.33", "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "integrity": "sha1-bTQzWIl2jSGyvNoKonfO07G/rfk=", "dev": true, "requires": { "os-tmpdir": "~1.0.2" @@ -12033,13 +11483,12 @@ "to-buffer": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", - "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" + "integrity": "sha1-STvUj2LXxD/N7TE6A9ytsuEhOoA=" }, "to-fast-properties": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", - "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=", - "dev": true + "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=" }, "to-object-path": { "version": "0.3.0", @@ -12052,7 +11501,7 @@ "to-regex": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "integrity": "sha1-E8/dmzNlUvMLUfM6iuG0Knp1mc4=", "requires": { "define-property": "^2.0.2", "extend-shallow": "^3.0.2", @@ -12082,19 +11531,17 @@ "toml": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.3.tgz", - "integrity": "sha512-O7L5hhSQHxuufWUdcTRPfuTh3phKfAZ/dqfxZFoxPCj2RYmpaSGLEIs016FCXItQwNr08yefUB5TSjzRYnajTA==", - "dev": true + "integrity": "sha1-jWg9cpV3yyhiMd/HqK/+WNMXKPs=" }, "tomlify-j0.4": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/tomlify-j0.4/-/tomlify-j0.4-3.0.0.tgz", - "integrity": "sha512-2Ulkc8T7mXJ2l0W476YC/A209PR38Nw8PuaCNtk9uI3t1zzFdGQeWYGQvmj2PZkVvRC/Yoi4xQKMRnWc/N29tQ==", - "dev": true + "integrity": "sha1-mUFNRSaMOjuL84voIUW3u6NLdHM=" }, "tooltip.js": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/tooltip.js/-/tooltip.js-1.3.0.tgz", - "integrity": "sha512-5aj0jSQ2J8OOKRPTricY45HNUPRVbVRJZpZm2Wy9pd5BknGEc6epHwJ1eFvEmhXc3hpocDyRpZq31IiWcmcLVg==", + "integrity": "sha1-xWEVzp5LVJ7doLMGo+Q5oXJiGB0=", "requires": { "popper.js": "^1.0.2" } @@ -12122,7 +11569,7 @@ "tough-cookie": { "version": "2.3.4", "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz", - "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==", + "integrity": "sha1-7GDO44rGdQY//JelwYlwV47oNlU=", "requires": { "punycode": "^1.4.1" } @@ -12141,13 +11588,12 @@ "trim-right": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", - "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=", - "dev": true + "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=" }, "true-case-path": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/true-case-path/-/true-case-path-1.0.3.tgz", - "integrity": "sha512-m6s2OdQe5wgpFMC+pAJ+q9djG82O2jcHPOI6RNg1yy9rCYR+WD6Nbpl32fDpfC56nirdRy+opFa/Vk7HYhqaew==", + "integrity": "sha1-+BO1qMhrQNpZYGcisUTjIleZ9H0=", "requires": { "glob": "^7.1.2" } @@ -12155,7 +11601,7 @@ "tslib": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz", - "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ==", + "integrity": "sha1-1+TdeSRdhUKMTX5IIqeZF5VMooY=", "dev": true }, "tty-browserify": { @@ -12181,7 +11627,6 @@ "version": "0.3.2", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", - "dev": true, "requires": { "prelude-ls": "~1.1.2" } @@ -12189,12 +11634,12 @@ "type-detect": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==" + "integrity": "sha1-dkb7XxiHHPu3dJ5pvTmmOI63RQw=" }, "type-is": { "version": "1.6.16", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz", - "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==", + "integrity": "sha1-+JzjQVQcZysl7nrjxz3uOyvlAZQ=", "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.18" @@ -12208,7 +11653,7 @@ "uid-safe": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/uid-safe/-/uid-safe-2.1.5.tgz", - "integrity": "sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA==", + "integrity": "sha1-Kz1cckDo/C5Y+Komnl7knAhXvTo=", "requires": { "random-bytes": "~1.0.0" } @@ -12216,12 +11661,12 @@ "umd": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/umd/-/umd-3.0.3.tgz", - "integrity": "sha512-4IcGSufhFshvLNcMCV80UnQVlZ5pMOC8mvNPForqwA4+lzYQuetTESLDQkeLmihq8bRcnpbQa48Wb8Lh16/xow==" + "integrity": "sha1-qp/mU8QrkJdnhInAEACstp8LJs8=" }, "undeclared-identifiers": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/undeclared-identifiers/-/undeclared-identifiers-1.1.2.tgz", - "integrity": "sha512-13EaeocO4edF/3JKime9rD7oB6QI8llAGhgn5fKOPyfkJbRb6NFv9pYV6dFEmpa4uRjKeBqLZP8GpuzqHlKDMQ==", + "integrity": "sha1-fYUKmIh8/0vQv2SZnAFNCO1tGsw=", "requires": { "acorn-node": "^1.3.0", "get-assigned-identifiers": "^1.2.0", @@ -12233,7 +11678,6 @@ "version": "0.3.1", "resolved": "https://registry.npmjs.org/unicode-trie/-/unicode-trie-0.3.1.tgz", "integrity": "sha1-1nHd3YkQGgi6w3tqUWEBBgIFIIU=", - "dev": true, "requires": { "pako": "^0.2.5", "tiny-inflate": "^1.0.0" @@ -12274,14 +11718,12 @@ "uniq": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=", - "dev": true + "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" }, "uniqs": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=", - "dev": true + "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=" }, "unpipe": { "version": "1.0.0", @@ -12291,8 +11733,7 @@ "unquote": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=", - "dev": true + "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=" }, "unset-value": { "version": "1.0.0", @@ -12338,12 +11779,12 @@ "upath": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/upath/-/upath-1.1.0.tgz", - "integrity": "sha512-bzpH/oBhoS/QI/YtbkqCg6VEiPYjSZtrHQM6/QnJS6OL9pKUFLqb3aFh4Scvwm45+7iAgiMkLhSbaZxUqmrprw==" + "integrity": "sha1-NSVll+RqWB20eT0M5H+prr/J+r0=" }, "uri-js": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "integrity": "sha1-lMVA4f93KVbiKZUHwBCupsiDjrA=", "dev": true, "requires": { "punycode": "^2.1.0" @@ -12352,7 +11793,7 @@ "punycode": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "integrity": "sha1-tYsBCsQMIsVldhbI0sLALHv0eew=", "dev": true } } @@ -12381,7 +11822,7 @@ "use": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/use/-/use-3.1.0.tgz", - "integrity": "sha512-6UJEQM/L+mzC3ZJNM56Q4DFGLX/evKGRg15UJHGB9X5j5Z3AFbgZvjUh2yq/UJUY4U5dh7Fal++XbNg1uzpRAw==", + "integrity": "sha1-FHFr8D/f79AwQK71jYtLhfOnxUQ=", "requires": { "kind-of": "^6.0.2" }, @@ -12389,7 +11830,7 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, @@ -12416,8 +11857,7 @@ "util.promisify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", - "dev": true, + "integrity": "sha1-RA9xZaRZyaFtwUXrjnLzVocJcDA=", "requires": { "define-properties": "^1.1.2", "object.getownpropertydescriptors": "^2.0.3" @@ -12426,7 +11866,7 @@ "utilise": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/utilise/-/utilise-2.3.7.tgz", - "integrity": "sha512-IjGNAQE7txhJI8avL0Vfu6sGwwuMyXhNyO73quQQD3U0ofA4fjjovuAaQvxVuJ3e1wvqVV13VLeuEjsjGv+Jmg==", + "integrity": "sha1-ZTTWZ7j9bYNhecXTvcPxMvgg/U0=", "requires": { "colors": "^1.2.3", "through": "^2.3.8" @@ -12435,7 +11875,7 @@ "colors": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/colors/-/colors-1.3.2.tgz", - "integrity": "sha512-rhP0JSBGYvpcNQj4s5AdShMeE5ahMop96cTeDl/v9qQQm2fYClE2QXZRi8wLzc+GmXSxdIqqbOIAhyObEXDbfQ==" + "integrity": "sha1-Lfj/Vz378lWvVi+M5xgda5caNZs=" } } }, @@ -12447,23 +11887,22 @@ "uuid": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.2.1.tgz", - "integrity": "sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA==" + "integrity": "sha1-EsUou51Y0LkmXZovbw/ovhf/HxQ=" }, "uws": { "version": "9.148.0", "resolved": "https://registry.npmjs.org/uws/-/uws-9.148.0.tgz", - "integrity": "sha512-vWt+e8dOdwLM4neb1xIeZuQ7ZUN3l7n0qTKrOUtU1EZrV4BpmrSnsEL30d062/ocqRMGtLpwzVFsLKFgXomA9g==" + "integrity": "sha1-Vq/zbLlfdZRXPa/yohEF7Et2RmQ=" }, "v8-compile-cache": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.0.2.tgz", - "integrity": "sha512-1wFuMUIM16MDJRCrpbpuEPTUGmM5QMUg0cr3KFwra2XgOgFcPGDQHDh3CszSCD2Zewc/dh/pamNEW8CbfDebUw==", - "dev": true + "integrity": "sha1-pCiyi7JnkHNMT8i8n6EG/M6/amw=" }, "validate-npm-package-license": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz", - "integrity": "sha512-63ZOUnL4SIXj4L0NixR3L1lcjO38crAbgrTpl28t8jjrfuiOBL5Iygm+60qPs/KsZGzPNg6Smnc/oY16QTjF0g==", + "integrity": "sha1-gWQ7y+8b3+zUYjeT3EZIlIupgzg=", "requires": { "spdx-correct": "^3.0.0", "spdx-expression-parse": "^3.0.0" @@ -12482,8 +11921,7 @@ "vendors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.1.tgz", - "integrity": "sha1-N61zyO5Bf7PVgOeFMSMH0nSEfyI=", - "dev": true + "integrity": "sha1-N61zyO5Bf7PVgOeFMSMH0nSEfyI=" }, "verror": { "version": "1.10.0", @@ -12537,7 +11975,7 @@ "vlq": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", - "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==" + "integrity": "sha1-jz5DKM9jsVQMDWfhsneDhviXWyY=" }, "vm-browserify": { "version": "0.0.4", @@ -12551,7 +11989,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", - "dev": true, "requires": { "defaults": "^1.0.3" } @@ -12559,7 +11996,7 @@ "wd": { "version": "1.10.3", "resolved": "https://registry.npmjs.org/wd/-/wd-1.10.3.tgz", - "integrity": "sha512-ffqqZDtFFLeg5u/4pw2vYKECW+z+vW6vc+7rcqF15uu1/rmw3BydV84BONNc9DIcQ5Z7gQFS/hAuMvj53eVtSg==", + "integrity": "sha1-OVrH61ipjlVjafj45fhF2R+xUqM=", "requires": { "archiver": "2.1.1", "async": "2.0.1", @@ -12578,7 +12015,7 @@ "request": { "version": "2.85.0", "resolved": "http://registry.npmjs.org/request/-/request-2.85.0.tgz", - "integrity": "sha512-8H7Ehijd4js+s6wuVPLjwORxD4zeuyjYugprdOXlPSqaApmL/QOy+EB/beICHVCHkGMKNh5rvihb5ov+IDw4mg==", + "integrity": "sha1-WgNhWkfGFCCz65m326IE+DYD4fo=", "requires": { "aws-sign2": "~0.7.0", "aws4": "^1.6.0", @@ -12609,13 +12046,12 @@ "whet.extend": { "version": "0.9.9", "resolved": "https://registry.npmjs.org/whet.extend/-/whet.extend-0.9.9.tgz", - "integrity": "sha1-+HfVv2SMl+WqVC+twW1qJZucEaE=", - "dev": true + "integrity": "sha1-+HfVv2SMl+WqVC+twW1qJZucEaE=" }, "which": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "integrity": "sha1-pFBD1U9YBTFtqNYvn1CRjT2nCwo=", "requires": { "isexe": "^2.0.0" } @@ -12628,7 +12064,7 @@ "wide-align": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "integrity": "sha1-rgdOa9wMFKQx6ATmJFScYzsABFc=", "requires": { "string-width": "^1.0.2 || 2" } @@ -12659,8 +12095,7 @@ "ws": { "version": "5.2.2", "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", - "dev": true, + "integrity": "sha1-3/7xSGa46NyRM1glFNG++vlumA8=", "requires": { "async-limiter": "~1.0.0" } @@ -12668,7 +12103,7 @@ "xrs": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/xrs/-/xrs-1.2.2.tgz", - "integrity": "sha512-pLmxYQnG3Qm0xtZZMFr7W7ls9DYNtNe9D5KLQpniu3DeoHDMkFXrjo8OjCEyhQ3Pf4Jr/pYFDhuMrQVTfEqEOw==", + "integrity": "sha1-dn7pjNCcl+ikudwRkZc9IU6nt6s=", "requires": { "colors": "^1.2.1", "express": "^4.15.4", @@ -12680,7 +12115,7 @@ "colors": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/colors/-/colors-1.3.2.tgz", - "integrity": "sha512-rhP0JSBGYvpcNQj4s5AdShMeE5ahMop96cTeDl/v9qQQm2fYClE2QXZRi8wLzc+GmXSxdIqqbOIAhyObEXDbfQ==" + "integrity": "sha1-Lfj/Vz378lWvVi+M5xgda5caNZs=" } } }, diff --git a/shuup/admin/views/menu.py b/shuup/admin/views/menu.py index e9b34ab127..cc0758d2a7 100644 --- a/shuup/admin/views/menu.py +++ b/shuup/admin/views/menu.py @@ -15,5 +15,5 @@ class MenuView(TemplateView): class MenuToggleView(View): def post(self, request, *args, **kwargs): - request.session["menu_open"] = int(request.POST.get("menu_open", 0)) + request.session["menu_open"] = not bool(request.session.get("menu_open", True)) return JsonResponse({"success": True}) diff --git a/shuup/notify/npm-shrinkwrap.json b/shuup/notify/npm-shrinkwrap.json index 15c2acd802..ddab45719b 100644 --- a/shuup/notify/npm-shrinkwrap.json +++ b/shuup/notify/npm-shrinkwrap.json @@ -7,7 +7,7 @@ "@mrmlnc/readdir-enhanced": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", + "integrity": "sha1-UkryQNGjYFJ7cwR17PoTRKpUDd4=", "requires": { "call-me-maybe": "^1.0.1", "glob-to-regexp": "^0.3.0" @@ -16,12 +16,12 @@ "@nodelib/fs.stat": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.0.tgz", - "integrity": "sha512-LAQ1d4OPfSJ/BMbI2DuizmYrrkD9JMaTdi2hQTlI53lQ4kRQPyZQRS4CYQ7O66bnBBnP/oYdRxbk++X0xuFU6A==" + "integrity": "sha1-UMHiJgrA7ZQ5oYHeNyWgFo1ZxIo=" }, "abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=" }, "ajv": { "version": "5.5.2", @@ -53,7 +53,7 @@ "ansi-to-html": { "version": "0.6.6", "resolved": "https://registry.npmjs.org/ansi-to-html/-/ansi-to-html-0.6.6.tgz", - "integrity": "sha512-90M/2sZna3OsoOEbSyXK46poFnlClBC53Rx6etNKQK7iShsX5fI5E/M9Ld6FurtLaxAWLuAPi0Jp8p3y5oAkxg==", + "integrity": "sha1-WKjQS4fsmoXjrSc8EqX7xxR7nEI=", "requires": { "entities": "^1.1.1" } @@ -61,7 +61,7 @@ "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "integrity": "sha1-vNZ5HqWuCXJeF+WtmIE0zUCz2RE=", "requires": { "sprintf-js": "~1.0.2" } @@ -69,7 +69,7 @@ "arr-flatten": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" + "integrity": "sha1-NgSLv/TntH4TZkQxbJlmnqWukfE=" }, "arr-union": { "version": "3.1.0", @@ -91,7 +91,7 @@ "asn1.js": { "version": "4.10.1", "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", - "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "integrity": "sha1-ucK/WAXx5kqt7tbfOiv6+1pz9aA=", "requires": { "bn.js": "^4.0.0", "inherits": "^2.0.1", @@ -134,7 +134,7 @@ "async-limiter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz", - "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==" + "integrity": "sha1-ePrtjD0HSrgfIrTphdeehzj3IPg=" }, "asynckit": { "version": "0.4.0", @@ -150,7 +150,7 @@ "autoprefixer": { "version": "8.6.5", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-8.6.5.tgz", - "integrity": "sha512-PLWJN3Xo/rycNkx+mp8iBDMTm3FeWe4VmYaZDSqL5QQB9sLsQkG5k8n+LNDFnhh9kdq2K+egL/icpctOmDHwig==", + "integrity": "sha1-ND89GT7VaLMgjgARehuW62kdTuk=", "requires": { "browserslist": "^3.2.8", "caniuse-lite": "^1.0.30000864", @@ -163,7 +163,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -171,7 +171,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -180,7 +180,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -195,7 +195,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -205,12 +205,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -226,7 +226,7 @@ "aws4": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.7.0.tgz", - "integrity": "sha512-32NDda82rhwD9/JBCCkB+MRYDp0oSvlo2IL6rQWA10PQi7tDUM3eqMSltXmY+Oyl/7N3P3qNtAlv7X0d9bI28w==", + "integrity": "sha1-1NDpudv8p3vwjusKikcVUP454ok=", "optional": true }, "babel-code-frame": { @@ -242,7 +242,7 @@ "babel-core": { "version": "6.26.3", "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", - "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", + "integrity": "sha1-suLwnjQtDwyI4vAuBneUEl51wgc=", "requires": { "babel-code-frame": "^6.26.0", "babel-generator": "^6.26.0", @@ -268,7 +268,7 @@ "babel-generator": { "version": "6.26.1", "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", - "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "integrity": "sha1-GERAjTuPDTWkBOp6wYDwh6YBvZA=", "requires": { "babel-messages": "^6.23.0", "babel-runtime": "^6.26.0", @@ -577,7 +577,7 @@ "babel-plugin-transform-es2015-modules-commonjs": { "version": "6.26.2", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", - "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "integrity": "sha1-WKeThjqefKhwvcWogRF/+sJ9tvM=", "requires": { "babel-plugin-transform-strict-mode": "^6.24.1", "babel-runtime": "^6.26.0", @@ -732,7 +732,7 @@ "babel-preset-env": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.7.0.tgz", - "integrity": "sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==", + "integrity": "sha1-3qefpOvriDzTXasH4mDBycBN93o=", "requires": { "babel-plugin-check-es2015-constants": "^6.22.0", "babel-plugin-syntax-trailing-function-commas": "^6.22.0", @@ -769,7 +769,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -842,7 +842,7 @@ "babylon": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==" + "integrity": "sha1-ry87iPpvXB5MY00aD46sT1WzleM=" }, "babylon-walk": { "version": "1.0.2", @@ -862,7 +862,7 @@ "base": { "version": "0.11.2", "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "integrity": "sha1-e95c7RRbbVUakNuH+DxVi060io8=", "requires": { "cache-base": "^1.0.1", "class-utils": "^0.3.5", @@ -884,7 +884,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -892,7 +892,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -900,7 +900,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -915,14 +915,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "base64-js": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.0.tgz", - "integrity": "sha512-ccav/yGvoa80BQDljCxsmmQ3Xvx60/UpBIij5QN21W3wBi/hhIC9OoO+KLpu9IJTS9j4DRVJ3aDDF9cMSoa2lw==" + "integrity": "sha1-yrHmEY8FEJXli1KBrqjBzSK/wOM=" }, "bcrypt-pbkdf": { "version": "1.0.1", @@ -936,7 +936,7 @@ "big.js": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", - "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==" + "integrity": "sha1-pfwpi4G54Nyi5FiCR4S2XFK6WI4=" }, "binary-extensions": { "version": "1.11.0", @@ -951,12 +951,12 @@ "bluebird": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", - "integrity": "sha512-MKiLiV+I1AA596t9w1sQJ8jkiSr5+ZKi0WKrYGUn6d1Fx+Ij4tIj+m2WMQSGczs5jZVxV339chE8iwk6F64wjA==" + "integrity": "sha1-2VUfnemPH82h5oPRfukaBgLuLrk=" }, "bn.js": { "version": "4.11.8", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", - "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==" + "integrity": "sha1-LN4J617jQfSEdGuwMJsyU7GxRC8=" }, "boolbase": { "version": "1.0.0", @@ -966,7 +966,7 @@ "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "integrity": "sha1-PH/L9SnYcibz0vUrlm/1Jx60Qd0=", "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -982,7 +982,7 @@ "brfs": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/brfs/-/brfs-1.6.1.tgz", - "integrity": "sha512-OfZpABRQQf+Xsmju8XE9bDjs+uU4vLREGolP7bDgcpsI17QREyZ4Bl+2KLxxx1kCgA0fAIhKQBaBYh+PEcCqYQ==", + "integrity": "sha1-t4ziM22BjiXuoEoJR8um1PuIScM=", "requires": { "quote-stream": "^1.0.1", "resolve": "^1.1.5", @@ -998,7 +998,7 @@ "browserify-aes": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "integrity": "sha1-Mmc0ZC9APavDADIJhTu3CtQo70g=", "requires": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -1011,7 +1011,7 @@ "browserify-cipher": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "integrity": "sha1-jWR0wbhwv9q807z8wZNKEOlPFfA=", "requires": { "browserify-aes": "^1.0.4", "browserify-des": "^1.0.0", @@ -1021,7 +1021,7 @@ "browserify-des": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.1.tgz", - "integrity": "sha512-zy0Cobe3hhgpiOM32Tj7KQ3Vl91m0njwsjzZQK1L+JDf11dzP9qIvjreVinsvXrgfjhStXwUWAEpB9D7Gwmayw==", + "integrity": "sha1-M0MSTbbXrVPiaogmMYcSvchFD5w=", "requires": { "cipher-base": "^1.0.1", "des.js": "^1.0.0", @@ -1054,7 +1054,7 @@ "browserify-zlib": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "integrity": "sha1-KGlFnZqjviRf6P4sofRuLn9U1z8=", "requires": { "pako": "~1.0.5" } @@ -1086,7 +1086,7 @@ "buffer-from": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + "integrity": "sha1-MnE7wCj3XAL9txDXx7zsHyxgcO8=" }, "buffer-xor": { "version": "1.0.3", @@ -1101,7 +1101,7 @@ "cache-base": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "integrity": "sha1-Cn9GQWgxyLZi7jb+TnxZ129marI=", "requires": { "collection-visit": "^1.0.0", "component-emitter": "^1.2.1", @@ -1145,7 +1145,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" }, "caseless": { "version": "0.12.0", @@ -1175,7 +1175,7 @@ "cipher-base": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "integrity": "sha1-h2Dk7MJy9MNjUy+SbYdKriwTl94=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -1184,7 +1184,7 @@ "clap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/clap/-/clap-1.2.3.tgz", - "integrity": "sha512-4CoL/A3hf90V3VIEjeuhSvlGFEHKzOz+Wfc2IVZc+FaUgU0ZQafJTP49fvnULipOPcAfqhyI2duwQyns6xqjYA==", + "integrity": "sha1-TzZ0WzIAhJJVf0ZBLWbVDLmbzlE=", "requires": { "chalk": "^1.1.3" } @@ -1192,7 +1192,7 @@ "class-utils": { "version": "0.3.6", "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "integrity": "sha1-+TNprouafOAv1B+q0MqDAzGQxGM=", "requires": { "arr-union": "^3.1.0", "define-property": "^0.2.5", @@ -1226,7 +1226,7 @@ "cli-spinners": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-1.3.1.tgz", - "integrity": "sha512-1QL4544moEsDVH9T/l6Cemov/37iv1RtoKf7NJ04A60+4MREXNfx/QvavbH6QoGdsD4N4Mwy49cmaINR/o2mdg==" + "integrity": "sha1-ACwZkJEtDVlYDJO9NsBW3pnkJZo=" }, "clone": { "version": "1.0.4", @@ -1274,7 +1274,7 @@ "color-convert": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.1.tgz", - "integrity": "sha512-mjGanIiwQJskCC18rPR6OmrZ6fm2Lc7PeGFYwCmy5J34wC6F1PzdGL6xeMfmgicfYcNLGuVFA3WzXtIDCQSZxQ==", + "integrity": "sha1-wSYRB66y8pTr/+ye2eytUppgl+0=", "requires": { "color-name": "^1.1.1" } @@ -1318,12 +1318,12 @@ "command-exists": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.7.tgz", - "integrity": "sha512-doWDvhXCcW5LK0cIUWrOQ8oMFXJv3lEQCkJpGVjM8v9SV0uhqYXB943538tEA2CiaWqSyuYUGAm5ezDwEx9xlw==" + "integrity": "sha1-FoKPDD/ysMWIBYYe8hG2T8FWkqg=" }, "commander": { "version": "2.17.0", "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.0.tgz", - "integrity": "sha512-477o1hdVORiFlZxw8wgsXYCef3lh0zl/OV0FTftqiDxJSWw6dPQ2ipS4k20J2qBcsmsmLKSyr2iFrf9e3JGi4w==" + "integrity": "sha1-nQeyXipvGYt22LdWoOipYEpqGmA=" }, "component-emitter": { "version": "1.2.1", @@ -1338,7 +1338,7 @@ "concat-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "integrity": "sha1-kEvfGUzTEi/Gdcd/xKw9T/D9GjQ=", "requires": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", @@ -1388,7 +1388,7 @@ "core-js": { "version": "2.5.7", "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", - "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==" + "integrity": "sha1-+XJgj/DOrWi4QaFqky0LGDeRgU4=" }, "core-util-is": { "version": "1.0.2", @@ -1398,7 +1398,7 @@ "cosmiconfig": { "version": "5.0.5", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.0.5.tgz", - "integrity": "sha512-94j37OtvxS5w7qr7Ta6dt67tWdnOxigBVN4VnSxNXFez9o18PGQ0D33SchKP17r9LAcWVTYV72G6vDayAUBFIg==", + "integrity": "sha1-qAnjwjBokc4Xq3A1nci99mH+LNA=", "requires": { "is-directory": "^0.3.1", "js-yaml": "^3.9.0", @@ -1408,12 +1408,12 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -1424,7 +1424,7 @@ "create-ecdh": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", - "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "integrity": "sha1-yREbbzMEXEaX8UR4f5JUzcd8Rf8=", "requires": { "bn.js": "^4.1.0", "elliptic": "^6.0.0" @@ -1433,7 +1433,7 @@ "create-hash": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "integrity": "sha1-iJB4rxGmN1a8+1m9IhmWvjqe8ZY=", "requires": { "cipher-base": "^1.0.1", "inherits": "^2.0.1", @@ -1445,7 +1445,7 @@ "create-hmac": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "integrity": "sha1-aRcMeLOrlXFHsriwRXLkfq0iQ/8=", "requires": { "cipher-base": "^1.0.3", "create-hash": "^1.1.0", @@ -1458,7 +1458,7 @@ "cross-spawn": { "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "integrity": "sha1-Sl7Hxk364iw6FBJNus3uhG2Ay8Q=", "requires": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -1470,7 +1470,7 @@ "crypto-browserify": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "integrity": "sha1-OWz58xN/A+S45TLFj2mCVOAPgOw=", "requires": { "browserify-cipher": "^1.0.0", "browserify-sign": "^4.0.0", @@ -1493,7 +1493,7 @@ "css-declaration-sorter": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-3.0.1.tgz", - "integrity": "sha512-jH4024SHZ3e0M7ann9VxpFpH3moplRXNz9ZBqvFMZqi09Yo5ARbs2wdPH8GqN9iRTlQynrbGbraNbBxBLei85Q==", + "integrity": "sha1-0OMFaw/YjcHqnc7/Q1rb6ccCp/g=", "requires": { "postcss": "^6.0.0", "timsort": "^0.3.0" @@ -1502,7 +1502,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -1510,7 +1510,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -1525,7 +1525,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -1535,12 +1535,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -1609,7 +1609,7 @@ "css-tree": { "version": "1.0.0-alpha25", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha25.tgz", - "integrity": "sha512-XC6xLW/JqIGirnZuUWHXCHRaAjje2b3OIB0Vj5RIJo6mIi/AdJo30quQl5LxUl0gkXDIrTrFGbMlcZjyFplz1A==", + "integrity": "sha1-G7+r+/bu708B2RCP8u3Qvi/jVZc=", "requires": { "mdn-data": "^1.0.0", "source-map": "^0.5.3" @@ -1729,7 +1729,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -1737,7 +1737,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -1747,7 +1747,7 @@ "caniuse-api": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "integrity": "sha1-Xk2Q4idJYdRikZl99Znj7QCO5MA=", "requires": { "browserslist": "^4.0.0", "caniuse-lite": "^1.0.0", @@ -1758,7 +1758,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -1768,7 +1768,7 @@ "coa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.1.tgz", - "integrity": "sha512-5wfTTO8E2/ja4jFSxePXlG5nRu5bBtL/r1HCIpJW/lzT6yDtKl0u0Z4o/Vpz32IpKmBn7HerheEZQgA9N2DarQ==", + "integrity": "sha1-8/iwsVBz411wJj+xBCyywCPbOK8=", "requires": { "q": "^1.1.2" } @@ -1776,7 +1776,7 @@ "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", + "integrity": "sha1-2SC0Mo1TSjrIKV1o971LpsQnvpo=", "requires": { "color-convert": "^1.9.1", "color-string": "^1.5.2" @@ -1785,7 +1785,7 @@ "color-string": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "integrity": "sha1-ybvF8BtYtUkvPWhXRZy2WQziBMw=", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -1794,7 +1794,7 @@ "csso": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/csso/-/csso-3.5.1.tgz", - "integrity": "sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==", + "integrity": "sha1-e564vmFiiXPBsmHhadLwJACOdYs=", "requires": { "css-tree": "1.0.0-alpha.29" }, @@ -1802,7 +1802,7 @@ "css-tree": { "version": "1.0.0-alpha.29", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.29.tgz", - "integrity": "sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==", + "integrity": "sha1-P6nU7zFCy9HDAedmTB81K9gvWjk=", "requires": { "mdn-data": "~1.1.0", "source-map": "^0.5.3" @@ -1823,7 +1823,7 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "has-flag": { "version": "3.0.0", @@ -1833,7 +1833,7 @@ "is-svg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-3.0.0.tgz", - "integrity": "sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ==", + "integrity": "sha1-kyHb0pwhLlypnE+peUxxS8r6L3U=", "requires": { "html-comment-regex": "^1.1.0" } @@ -1841,7 +1841,7 @@ "js-yaml": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.10.0.tgz", - "integrity": "sha512-O2v52ffjLa9VeM43J4XocZE//WT9N0IiwDa3KSHH7Tu8CtH+1qM8SIZvnsTh6v+4yFy5KUY3BHUVwjpfAWsjIA==", + "integrity": "sha1-LnhEFka9RoLpY/IrbpKCPDCcYtw=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -1850,12 +1850,12 @@ "normalize-url": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.2.0.tgz", - "integrity": "sha512-WvF3Myk0NhXkG8S9bygFM4IC1KOvnVJGq0QoGeoqOYOBeinBZp5ybW3QuYbTc89lkWBMM9ZBO4QGRoc0353kKA==" + "integrity": "sha1-mNCUivyCgp83QyD0Bf6cpVpfhWc=" }, "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -1929,7 +1929,7 @@ "postcss-merge-longhand": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.4.tgz", - "integrity": "sha512-wLi2u22mSdBDBjLF8pyaPCNppOmqb+B4O0Dlt/4nUwn79EltDUJmCeCDYqo7SB2z9puOHTftnxviY4J9xS+ygQ==", + "integrity": "sha1-v/x8b/oUZZHJk6C7g3PWX5oG1NA=", "requires": { "css-color-names": "0.0.4", "postcss": "^6.0.0", @@ -2092,7 +2092,7 @@ "reduce-css-calc": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.4.tgz", - "integrity": "sha512-i/vWQbyd3aJRmip9OVSN9V6nIjLf/gg/ctxb0CpvHWtcRysFl/ngDBQD+rqavxdw/doScA3GMBXhzkHQ4GCzFQ==", + "integrity": "sha1-wg6c2oRFrXPU/0vqlgxvg1N5Fwg=", "requires": { "css-unit-converter": "^1.1.1", "postcss-value-parser": "^3.3.0" @@ -2101,12 +2101,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -2114,7 +2114,7 @@ "svgo": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.0.5.tgz", - "integrity": "sha512-nYrifviB77aNKDNKKyuay3M9aYiK6Hv5gJVDdjj2ZXTQmI8WZc8+UPLR5IpVlktJfSu3co/4XcWgrgI6seGBPg==", + "integrity": "sha1-cEA2TAYqBTirrP9EAc6momp6OJo=", "requires": { "coa": "~2.0.1", "colors": "~1.1.2", @@ -2155,7 +2155,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -2163,7 +2163,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -2178,7 +2178,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -2188,12 +2188,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -2231,7 +2231,7 @@ "deasync": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/deasync/-/deasync-0.1.13.tgz", - "integrity": "sha512-/6ngYM7AapueqLtvOzjv9+11N2fHDSrkxeMF1YPE20WIfaaawiBg+HZH1E5lHrcJxlKR42t6XPOEmMmqcAsU1g==", + "integrity": "sha1-gVwrabvREXyuVwFSzYlWYcCfIOo=", "requires": { "bindings": "~1.2.1", "nan": "^2.0.7" @@ -2240,7 +2240,7 @@ "debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "integrity": "sha1-XRKFFd8TT/Mn6QpMk/Tgd6U2NB8=", "requires": { "ms": "2.0.0" } @@ -2280,7 +2280,7 @@ "define-property": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "integrity": "sha1-1Flono1lS6d+AqgX+HENcCyxbp0=", "requires": { "is-descriptor": "^1.0.2", "isobject": "^3.0.1" @@ -2289,7 +2289,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -2297,7 +2297,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -2305,7 +2305,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -2320,7 +2320,7 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, @@ -2364,7 +2364,7 @@ "diffie-hellman": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "integrity": "sha1-QOjumPVaIUlgcUaSHGPhrl89KHU=", "requires": { "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", @@ -2390,7 +2390,7 @@ "domain-browser": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==" + "integrity": "sha1-PTH1AZGmdJ3RN1p/Ui6CPULlTto=" }, "domelementtype": { "version": "1.3.0", @@ -2400,7 +2400,7 @@ "domhandler": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", + "integrity": "sha1-iAUJfpM9ZehVRvcm1g9euItE+AM=", "requires": { "domelementtype": "1" } @@ -2417,7 +2417,7 @@ "dot-prop": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz", - "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==", + "integrity": "sha1-HxngwuGqDjJ5fEl5nyg3rGr2nFc=", "requires": { "is-obj": "^1.0.0" } @@ -2425,7 +2425,7 @@ "dotenv": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-5.0.1.tgz", - "integrity": "sha512-4As8uPrjfwb7VXC+WnLCbXK7y+Ueb2B3zgNCePYfhxS1PYeaO1YTeplffTEcbfLhvFNGLAz90VvJs9yomG7bow==" + "integrity": "sha1-pTF0Wb09eauIz/bkQFemo/ux/O8=" }, "duplexer2": { "version": "0.1.4", @@ -2447,7 +2447,7 @@ "editorconfig": { "version": "0.13.3", "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-0.13.3.tgz", - "integrity": "sha512-WkjsUNVCu+ITKDj73QDvi0trvpdDWdkDyHybDGSXPfekLCqwmpD7CP7iPbvBgosNuLcI96XTDwNa75JyFl7tEQ==", + "integrity": "sha1-5SGeWHlR1glY/ZTqmpoAjN7/GzQ=", "requires": { "bluebird": "^3.0.5", "commander": "^2.9.0", @@ -2498,7 +2498,7 @@ "errno": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz", - "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==", + "integrity": "sha1-RoTXF3mtOa8Xfj8AeZb3xnyFJhg=", "optional": true, "requires": { "prr": "~1.0.1" @@ -2507,7 +2507,7 @@ "error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "integrity": "sha1-tKxAZIEH/c3PriQvQovqihTU8b8=", "requires": { "is-arrayish": "^0.2.1" } @@ -2515,7 +2515,7 @@ "es-abstract": { "version": "1.12.0", "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.12.0.tgz", - "integrity": "sha512-C8Fx/0jFmV5IPoMOFPA9P9G5NtqW+4cOPit3MIuvR2t7Ag2K15EJTpxnHAYTzL+aYQJIESYeXZmDBfOBE1HcpA==", + "integrity": "sha1-nbvdJ8aFbwABQhyhh4LXhr+KYWU=", "requires": { "es-to-primitive": "^1.1.1", "function-bind": "^1.1.1", @@ -2547,7 +2547,7 @@ "escodegen": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.9.1.tgz", - "integrity": "sha512-6hTjO1NAWkHnDk3OqQ4YrCuwwmGHL9S3nPlzBOUG/R44rda3wLNrfvQ5fkSGjyhHFKM7ALPKcKGrwvCLe0lC7Q==", + "integrity": "sha1-264X75bI5L7bE1b0UE+kzC98t+I=", "requires": { "esprima": "^3.1.3", "estraverse": "^4.2.0", @@ -2564,7 +2564,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=", "optional": true } } @@ -2597,7 +2597,7 @@ "evp_bytestokey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "integrity": "sha1-f8vbGY3HGVlDLv4ThCaE4FJaywI=", "requires": { "md5.js": "^1.3.4", "safe-buffer": "^5.1.1" @@ -2621,7 +2621,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -2647,7 +2647,7 @@ "acorn": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.1.tgz", - "integrity": "sha512-d+nbxBUGKg7Arpsvbnlq61mc12ek3EY8EQldM3GPAhWJ1UVxC6TDGbIvUMNU6obBX3i1+ptCIzV4vq0gFPEGVQ==" + "integrity": "sha1-8JWCkpdwanyXdpWMCvyJMKm52dg=" }, "isarray": { "version": "0.0.1", @@ -2665,7 +2665,7 @@ "fast-glob": { "version": "2.2.2", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.2.tgz", - "integrity": "sha512-TR6zxCKftDQnUAPvkrCWdBgDq/gbqx8A3ApnBrR5rMvpp6+KMJI0Igw7fkWPgeVK0uhRXTXdvO3O+YP0CaUX2g==", + "integrity": "sha1-cXIzOKybTg4v/x1nSKKhPV7TUr8=", "requires": { "@mrmlnc/readdir-enhanced": "^2.2.1", "@nodelib/fs.stat": "^1.0.1", @@ -2688,7 +2688,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -2781,7 +2781,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -2791,14 +2791,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -2871,7 +2871,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -2879,7 +2879,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -2887,7 +2887,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -2933,12 +2933,12 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -2976,7 +2976,7 @@ "filesize": { "version": "3.6.1", "resolved": "https://registry.npmjs.org/filesize/-/filesize-3.6.1.tgz", - "integrity": "sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg==" + "integrity": "sha1-CQuz7gG2+AGoqL6Z0xcQs0Irsxc=" }, "flatten": { "version": "1.0.2", @@ -3026,7 +3026,7 @@ "fsevents": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", - "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "integrity": "sha1-9B3LGvJYKvNpLaNvxVy9jhBBxCY=", "optional": true, "requires": { "nan": "^2.9.2", @@ -3488,7 +3488,7 @@ "fswatcher-child": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/fswatcher-child/-/fswatcher-child-1.0.5.tgz", - "integrity": "sha512-T5BsoXc63WcPKLcQh77g3oJOqCHnXPp/QLuLgD9jhRBwDuOiVXL8PL6Dcy3ByfsdZmHKYQuPYN8PXEphyoS4qA==", + "integrity": "sha1-E00BL/p0kYl1YX4A5W5BOfNssUA=", "requires": { "chokidar": "^2.0.3" }, @@ -3496,7 +3496,7 @@ "anymatch": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "integrity": "sha1-vLJLTzeTTZqnrBe0ra+J58du8us=", "requires": { "micromatch": "^3.1.4", "normalize-path": "^2.1.1" @@ -3515,7 +3515,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -3542,7 +3542,7 @@ "chokidar": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.0.4.tgz", - "integrity": "sha512-z9n7yt9rOvIJrMhvDtDictKrkFHeihkNl6uWMmZlmL6tJtX9Cs+87oK+teBx+JIgzvbX3yZHT3eF8vpbDxHJXQ==", + "integrity": "sha1-NW/04rDo5D4yLRijckYLvPOszSY=", "requires": { "anymatch": "^2.0.0", "async-each": "^1.0.0", @@ -3628,7 +3628,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -3638,14 +3638,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -3718,7 +3718,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -3726,7 +3726,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -3734,7 +3734,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -3780,12 +3780,12 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -3807,7 +3807,7 @@ "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "integrity": "sha1-pWiZ0+o8m6uHS7l3O3xe3pL0iV0=" }, "generic-names": { "version": "1.0.3", @@ -3844,7 +3844,7 @@ "globals": { "version": "9.18.0", "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==" + "integrity": "sha1-qjiWs+abSH8X4x7SFD1pqOMMLYo=" }, "graceful-fs": { "version": "4.1.11", @@ -3879,7 +3879,7 @@ "has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "integrity": "sha1-ci18v8H2qoJB8W3YFOAR4fQeh5Y=", "requires": { "function-bind": "^1.1.1" } @@ -3963,7 +3963,7 @@ "hash.js": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.3.tgz", - "integrity": "sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA==", + "integrity": "sha1-NA3tvmKQGHFRweodd3o0SJNd+EY=", "requires": { "inherits": "^2.0.3", "minimalistic-assert": "^1.0.0" @@ -3979,7 +3979,7 @@ "hex-color-regex": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" + "integrity": "sha1-TAb8y0YC/iYCs8k9+C1+fb8aio4=" }, "hmac-drbg": { "version": "1.0.1", @@ -4018,7 +4018,7 @@ "htmlnano": { "version": "0.1.10", "resolved": "https://registry.npmjs.org/htmlnano/-/htmlnano-0.1.10.tgz", - "integrity": "sha512-eTEUzz8VdWYp+w/KUdb99kwao4reR64epUySyZkQeepcyzPQ2n2EPWzibf6QDxmkGy10Kr+CKxYqI3izSbmhJQ==", + "integrity": "sha1-oKVI60x2rizyQj7HolyIFzTT3qY=", "requires": { "cssnano": "^3.4.0", "object-assign": "^4.0.1", @@ -4031,7 +4031,7 @@ "coa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.1.tgz", - "integrity": "sha512-5wfTTO8E2/ja4jFSxePXlG5nRu5bBtL/r1HCIpJW/lzT6yDtKl0u0Z4o/Vpz32IpKmBn7HerheEZQgA9N2DarQ==", + "integrity": "sha1-8/iwsVBz411wJj+xBCyywCPbOK8=", "requires": { "q": "^1.1.2" } @@ -4039,7 +4039,7 @@ "csso": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/csso/-/csso-3.5.1.tgz", - "integrity": "sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==", + "integrity": "sha1-e564vmFiiXPBsmHhadLwJACOdYs=", "requires": { "css-tree": "1.0.0-alpha.29" }, @@ -4047,7 +4047,7 @@ "css-tree": { "version": "1.0.0-alpha.29", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.29.tgz", - "integrity": "sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==", + "integrity": "sha1-P6nU7zFCy9HDAedmTB81K9gvWjk=", "requires": { "mdn-data": "~1.1.0", "source-map": "^0.5.3" @@ -4058,12 +4058,12 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "js-yaml": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.10.0.tgz", - "integrity": "sha512-O2v52ffjLa9VeM43J4XocZE//WT9N0IiwDa3KSHH7Tu8CtH+1qM8SIZvnsTh6v+4yFy5KUY3BHUVwjpfAWsjIA==", + "integrity": "sha1-LnhEFka9RoLpY/IrbpKCPDCcYtw=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -4072,7 +4072,7 @@ "svgo": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.0.5.tgz", - "integrity": "sha512-nYrifviB77aNKDNKKyuay3M9aYiK6Hv5gJVDdjj2ZXTQmI8WZc8+UPLR5IpVlktJfSu3co/4XcWgrgI6seGBPg==", + "integrity": "sha1-cEA2TAYqBTirrP9EAc6momp6OJo=", "requires": { "coa": "~2.0.1", "colors": "~1.1.2", @@ -4147,7 +4147,7 @@ "ieee754": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.11.tgz", - "integrity": "sha512-VhDzCKN7K8ufStx/CLj5/PDTMgph+qwN5Pkd5i0sGnVwk56zJ0lkT8Qzi1xqWLS0Wp29DgDtNeS7v8/wMoZeHg==" + "integrity": "sha1-wWOE/+APW3g1gk5ntvK9RKUilFU=" }, "image-size": { "version": "0.5.5", @@ -4173,12 +4173,12 @@ "ini": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" + "integrity": "sha1-7uJfVtscnsYIXgwid4CD9Zar+Sc=" }, "invariant": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "integrity": "sha1-YQ88ksk1nOHbYW5TgAjSP/NRWOY=", "requires": { "loose-envify": "^1.0.0" } @@ -4212,12 +4212,12 @@ "is-buffer": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "integrity": "sha1-76ouqdqg16suoTqXsritUf776L4=" }, "is-callable": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", - "integrity": "sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==" + "integrity": "sha1-HhrfIZ4e62hNaR+dagX/DTCiTXU=" }, "is-color-stop": { "version": "1.1.0", @@ -4248,7 +4248,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -4258,7 +4258,7 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, @@ -4293,7 +4293,7 @@ "is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "integrity": "sha1-LBY7P6+xtgbZ0Xko8FwqHDjgdnc=", "requires": { "isobject": "^3.0.1" }, @@ -4316,7 +4316,7 @@ "is-resolvable": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" + "integrity": "sha1-+xj4fOH+uSUWnJpAfBkxijIG7Yg=" }, "is-svg": { "version": "2.1.0", @@ -4340,12 +4340,12 @@ "is-url": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", - "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" + "integrity": "sha1-BKTfRtKMTP89c9Af8Gq+sxihqlI=" }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" + "integrity": "sha1-0YUOuXkezRjmGCzhKjDzlmNLsZ0=" }, "is-wsl": { "version": "1.1.0", @@ -4379,12 +4379,12 @@ "js-base64": { "version": "2.4.5", "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.4.5.tgz", - "integrity": "sha512-aUnNwqMOXw3yvErjMPSQu6qIIzUmT1e5KcU1OZxRDU1g/am6mzBvcrmLAYwzmB59BHPrh5/tKaiF4OPhqRWESQ==" + "integrity": "sha1-4pPNPHyC8HDXAPx6HKCi5p8QH5I=" }, "js-beautify": { "version": "1.7.5", "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.7.5.tgz", - "integrity": "sha512-9OhfAqGOrD7hoQBLJMTA+BKuKmoEtTJXzZ7WDF/9gvjtey1koVLuZqIY6c51aPDjbNdNtIXAkiWKVhziawE9Og==", + "integrity": "sha1-adllHvYNu2SfZVJ7U2dJUBOKeRk=", "requires": { "config-chain": "~1.1.5", "editorconfig": "^0.13.2", @@ -4420,7 +4420,7 @@ "json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" + "integrity": "sha1-u4Z8+zRQ5pEHwTHRxRS6s9yLyqk=" }, "json-schema": { "version": "0.2.3", @@ -4468,7 +4468,7 @@ "less": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/less/-/less-3.0.4.tgz", - "integrity": "sha512-q3SyEnPKbk9zh4l36PGeW2fgynKu+FpbhiUNx/yaiBUQ3V0CbACCgb9FzYWcRgI2DJlP6eI4jc8XPrCTi55YcQ==", + "integrity": "sha1-0n3O26yWAxyee3bx2h5LfYN2CBQ=", "requires": { "errno": "^0.1.1", "graceful-fs": "^4.1.2", @@ -4483,7 +4483,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=", "optional": true } } @@ -4511,7 +4511,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" }, "lodash.camelcase": { "version": "4.3.0", @@ -4541,7 +4541,7 @@ "log-symbols": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz", - "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==", + "integrity": "sha1-V0Dhxdbw39pK2TI7UzIQfva0xAo=", "requires": { "chalk": "^2.0.1" }, @@ -4549,7 +4549,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -4557,7 +4557,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -4572,7 +4572,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -4582,7 +4582,7 @@ "loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "integrity": "sha1-ce5R+nvkyuwaY4OffmgtgTLTDK8=", "requires": { "js-tokens": "^3.0.0 || ^4.0.0" } @@ -4598,7 +4598,7 @@ "magic-string": { "version": "0.22.5", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.22.5.tgz", - "integrity": "sha512-oreip9rJZkzvA8Qzk9HFs8fZGF/u7H/gtrE8EN6RjKJ9kh2HlC+yQ2QezifqTZfGyiuAV0dRv5a+y/8gBb1m9w==", + "integrity": "sha1-jpz1r930Q4XB2lvCpqDb0QsDZX4=", "requires": { "vlq": "^0.2.2" } @@ -4633,7 +4633,7 @@ "mdn-data": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-1.1.4.tgz", - "integrity": "sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==" + "integrity": "sha1-ULXU/8RXUnZXPE7tuHgIEqhBnwE=" }, "merge-source-map": { "version": "1.0.4", @@ -4646,12 +4646,12 @@ "merge2": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.2.2.tgz", - "integrity": "sha512-bgM8twH86rWni21thii6WCMQMRMmwqqdW3sGWi9IipnVAszdLXRjwDwAnyrVXo6DuP3AjRMMttZKUB48QWIFGg==" + "integrity": "sha1-AyEuPajYbE2FI869YxgZNBT5TjQ=" }, "miller-rabin": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "integrity": "sha1-8IA1HIZbDcViqEYpZtqlNUPHik0=", "requires": { "bn.js": "^4.0.0", "brorand": "^1.0.1" @@ -4660,18 +4660,18 @@ "mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "integrity": "sha1-Ms2eXGRVO9WNGaVor0Uqz/BJgbE=", "optional": true }, "mime-db": { "version": "1.33.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==" + "integrity": "sha1-o0kgUKXLm2NFBUHjnZeI0icng9s=" }, "mime-types": { "version": "2.1.18", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "integrity": "sha1-bzI/YKg9ERRvgx/xH9ZuL+VQO7g=", "requires": { "mime-db": "~1.33.0" } @@ -4679,12 +4679,12 @@ "mimic-fn": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==" + "integrity": "sha1-ggyGo5M0ZA6ZUWkovQP8qIBX0CI=" }, "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + "integrity": "sha1-LhlN4ERibUoQ5/f7wAznPoPk1cc=" }, "minimalistic-crypto-utils": { "version": "1.0.1", @@ -4694,7 +4694,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -4707,7 +4707,7 @@ "mixin-deep": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz", - "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==", + "integrity": "sha1-pJ5yaNzhoNlpjkUybFYm3zVD0P4=", "requires": { "for-in": "^1.0.2", "is-extendable": "^1.0.1" @@ -4716,7 +4716,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -4739,12 +4739,12 @@ "nan": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/nan/-/nan-2.10.0.tgz", - "integrity": "sha512-bAdJv7fBLhWC+/Bls0Oza+mvTaNQtP+1RyhhhvD95pgUJz6XM5IzgmxOkItJ9tkoCiplvAnXI1tNmmUD/eScyA==" + "integrity": "sha1-ltDNYQ69WNS03pzAxoKM2pnHVI8=" }, "nanomatch": { "version": "1.2.13", "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "integrity": "sha1-uHqKpPwN6P5r6IiVs4mD/yZb0Rk=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -4772,24 +4772,24 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "nice-try": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.4.tgz", - "integrity": "sha512-2NpiFHqC87y/zFke0fC0spBXL3bBsoh/p5H1EFhshxjCR5+0g2d6BiXbUFz9v1sAcxsk2htp2eQnNIci2dIYcA==" + "integrity": "sha1-2Tli9sUvLBVYwPvabVEoGfHv4cQ=" }, "node-forge": { "version": "0.7.5", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.7.5.tgz", - "integrity": "sha512-MmbQJ2MTESTjt3Gi/3yG1wGpIMhUfcIypUCGtTizFR9IiccFwxSpfp0vtIZlkFclEqERemxfnSdZEMR9VqqEFQ==" + "integrity": "sha1-bBUsNFzhHFL0ZcKr2VfoY5zWdN8=" }, "node-libs-browser": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.1.0.tgz", - "integrity": "sha512-5AzFzdoIMb89hBGMZglEegffzgRg+ZFoUmisQ8HI4j1KDdpx13J0taNp2y9xPbur6W61gepGDDotGBVQ7mfUCg==", + "integrity": "sha1-X5QmPUBPbkR2fXJpAf/wVHjWAN8=", "requires": { "assert": "^1.1.1", "browserify-zlib": "^0.2.0", @@ -4819,7 +4819,7 @@ "node-releases": { "version": "1.0.0-alpha.10", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.0.0-alpha.10.tgz", - "integrity": "sha512-BSQrRgOfN6L/MoKIa7pRUc7dHvflCXMcqyTBvphixcSsgJTuUd24vAFONuNfVsuwTyz28S1HEc9XN6ZKylk4Hg==", + "integrity": "sha1-YcjV+bWy4F2E66lB0FtvUgL2iio=", "requires": { "semver": "^5.3.0" } @@ -4908,12 +4908,12 @@ "object-inspect": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.4.1.tgz", - "integrity": "sha512-wqdhLpfCUbEsoEwl3FXwGyv8ief1k/1aUdIPCqVnupM6e8l63BEJdiF/0swtn04/8p05tG/T0FrpTlfwvljOdw==" + "integrity": "sha1-N/+xDnGtrzdI0F9xO0yUUvQCy8Q=" }, "object-keys": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", - "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==" + "integrity": "sha1-CcU4VTd1dTEMymL1W7M0q/97PtI=" }, "object-visit": { "version": "1.0.1", @@ -4984,7 +4984,7 @@ "opn": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/opn/-/opn-5.3.0.tgz", - "integrity": "sha512-bYJHo/LOmoTd+pfiYhfZDnf9zekVJrY+cnS2a5F2x+w5ppvTqObojTP7WiFG+kVZs9Inw+qQ/lw7TroWwhdd2g==", + "integrity": "sha1-ZIcVZchjh18FLP31PT48ta21Oxw=", "requires": { "is-wsl": "^1.1.0" } @@ -5012,7 +5012,7 @@ "ora": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/ora/-/ora-2.1.0.tgz", - "integrity": "sha512-hNNlAd3gfv/iPmsNxYoAPLvxg7HuPozww7fFonMZvL84tP6Ox5igfk5j/+a9rtJJwqMgKK+JgWsAQik5o0HTLA==", + "integrity": "sha1-bK8oMOuSSUGGHsU6FzeZ4Ai1Hls=", "requires": { "chalk": "^2.3.1", "cli-cursor": "^2.1.0", @@ -5030,7 +5030,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5038,7 +5038,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5061,7 +5061,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -5086,12 +5086,12 @@ "pako": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.6.tgz", - "integrity": "sha512-lQe48YPsMJAig+yngZ87Lus+NF+3mtu7DVOBu6b/gHO1YpKwIj5AWjZ/TOS7i46HD/UixzWb1zeWDZfGZ3iYcg==" + "integrity": "sha1-AQEhG6pwxLykoPY/Igbpe3368lg=" }, "parcel-bundler": { "version": "1.9.7", "resolved": "https://registry.npmjs.org/parcel-bundler/-/parcel-bundler-1.9.7.tgz", - "integrity": "sha512-x+RiXe/C+aOoFuw+acH/NKjKmUJ/2zbFWFUS/KE5jBk2ErsN0Dc3OxLpmEaeIMU4oMPWFeNm5mRXcXdeUwf7GA==", + "integrity": "sha1-XNAIUN6gJU03cAXVWrS/YEKborw=", "requires": { "ansi-to-html": "^0.6.4", "babel-code-frame": "^6.26.0", @@ -5157,7 +5157,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5175,7 +5175,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -5202,7 +5202,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -5211,7 +5211,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5226,7 +5226,7 @@ "cssnano": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.0.5.tgz", - "integrity": "sha512-P2O0sz/YAAzqZVsSWOrbliPCr0c6abwVNQmFZ48AgejN/GbzwEf6IVFGQAj0UKHC+crv60wUAPQocAnDmeWlkg==", + "integrity": "sha1-h4m1/b574F2KD35FxMeJ6+cS9ao=", "requires": { "cosmiconfig": "^5.0.0", "cssnano-preset-default": "^4.0.0", @@ -5237,7 +5237,7 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "expand-brackets": { "version": "2.1.4", @@ -5308,7 +5308,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -5318,14 +5318,14 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -5384,7 +5384,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -5392,7 +5392,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -5400,7 +5400,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -5446,7 +5446,7 @@ "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -5455,7 +5455,7 @@ "json5": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "integrity": "sha1-d5+wAYYE+oVOrL9iUhgNg1Q+Pb4=", "requires": { "minimist": "^1.2.0" } @@ -5463,12 +5463,12 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -5493,7 +5493,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -5503,7 +5503,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "strip-ansi": { "version": "4.0.0", @@ -5516,7 +5516,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -5526,7 +5526,7 @@ "parse-asn1": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.1.tgz", - "integrity": "sha512-KPx7flKXg775zZpnp9SxJlz00gTd4BmJ2yJufSc44gMCRrRQ7NSzAcSJQfifuOLgW6bEi+ftrALtsgALeB2Adw==", + "integrity": "sha1-9r8pOBgzK9DatU77Fgh3JHRebKg=", "requires": { "asn1.js": "^4.0.0", "browserify-aes": "^1.0.0", @@ -5577,12 +5577,12 @@ "path-parse": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "integrity": "sha1-1i27VnlAXXLEc37FhgDp3c8G0kw=" }, "pbkdf2": { "version": "3.0.16", "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.16.tgz", - "integrity": "sha512-y4CXP3thSxqf7c0qmOF+9UeOTrifiVTIM+u7NWlq+PRsHbr7r7dpCmvzrZxa96JJUNi0Y5w9VqG5ZNeCVMoDcA==", + "integrity": "sha1-dAQgjsawG2LYW/g4U6gGT42cKlw=", "requires": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -5610,7 +5610,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -5691,7 +5691,7 @@ "postcss-filter-plugins": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/postcss-filter-plugins/-/postcss-filter-plugins-2.0.3.tgz", - "integrity": "sha512-T53GVFsdinJhgwm7rg1BzbeBRomOg9y5MBVhGcsV0CxurUdVj1UlPdKtn7aqYA/c/QVkzKMjq2bSV5dKG5+AwQ==", + "integrity": "sha1-giRf34IzcEFkXkdxFNjlk6oYuOw=", "requires": { "postcss": "^5.0.4" } @@ -5775,7 +5775,7 @@ "postcss-modules": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/postcss-modules/-/postcss-modules-1.3.2.tgz", - "integrity": "sha512-QujH5ZpPtr1fBWTKDa43Hx45gm7p19aEtHaAtkMCBZZiB/D5za2wXSMtAf94tDUZHF3F5KZcTXISUNqgEQRiDw==", + "integrity": "sha1-CmFrhDh/H2DdKKAfWXaH6Ft7hIE=", "requires": { "css-modules-loader-core": "^1.1.0", "generic-names": "^1.0.3", @@ -5787,7 +5787,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5795,7 +5795,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5810,7 +5810,7 @@ "postcss": { "version": "7.0.2", "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.2.tgz", - "integrity": "sha512-fmaUY5370keLUTx+CnwRxtGiuFTcNBLQBqr1oE3WZ/euIYmGAo0OAgOhVJ3ByDnVmOR3PK+0V9VebzfjRIUcqw==", + "integrity": "sha1-e1oQneNWgE4n+VqWC+8OTVvJuxg=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -5820,12 +5820,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -5844,7 +5844,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5852,7 +5852,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5867,7 +5867,7 @@ "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -5877,12 +5877,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -5901,7 +5901,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5909,7 +5909,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5924,7 +5924,7 @@ "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -5934,12 +5934,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -5958,7 +5958,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -5966,7 +5966,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -5981,7 +5981,7 @@ "postcss": { "version": "6.0.22", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.22.tgz", - "integrity": "sha512-Toc9lLoUASwGqxBSJGTVcOQiDqjK+Z2XlWBg+IgYwQMY9vA2f7iMpXVc1GpPcfTSyM5lkxNo0oDwDRO+wm7XHA==", + "integrity": "sha1-4jt4MUkFw7kMvWFwISHnp4hI8qM=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -5991,12 +5991,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6024,7 +6024,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6032,7 +6032,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6047,7 +6047,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6057,12 +6057,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6083,7 +6083,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6091,7 +6091,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6106,7 +6106,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6116,12 +6116,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6142,7 +6142,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6150,7 +6150,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6165,7 +6165,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6175,12 +6175,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6200,7 +6200,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6208,7 +6208,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6223,7 +6223,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6233,12 +6233,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6258,7 +6258,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6266,7 +6266,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6281,7 +6281,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6291,12 +6291,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6315,7 +6315,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6323,7 +6323,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6338,7 +6338,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6348,12 +6348,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6383,7 +6383,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -6391,7 +6391,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -6406,7 +6406,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -6416,12 +6416,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -6513,7 +6513,7 @@ "posthtml": { "version": "0.11.3", "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.11.3.tgz", - "integrity": "sha512-quMHnDckt2DQ9lRi6bYLnuyBDnVzK+McHa8+ar4kTdYbWEo/92hREOu3h70ZirudOOp/my2b3r0m5YtxY52yrA==", + "integrity": "sha1-F+opIbBVW3RV8zyXe9Fti4y3Tyc=", "requires": { "object-assign": "^4.1.1", "posthtml-parser": "^0.3.3", @@ -6523,7 +6523,7 @@ "posthtml-parser": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.3.3.tgz", - "integrity": "sha512-H/Z/yXGwl49A7hYQLV1iQ3h87NE0aZ/PMZhFwhw3lKeCAN+Ti4idrHvVvh4/GX10I7u77aQw+QB4vV5/Lzvv5A==", + "integrity": "sha1-P+mG/KnwDA8QnXMbpZCxkvJud20=", "requires": { "htmlparser2": "^3.9.2", "isobject": "^2.1.0", @@ -6535,7 +6535,7 @@ "posthtml-parser": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.4.1.tgz", - "integrity": "sha512-h7vXIQ21Ikz2w5wPClPakNP6mJeJCK6BT0GpqnQrNNABdR7/TchNlFyryL1Bz6Ww53YWCKkr6tdZuHlxY1AVdQ==", + "integrity": "sha1-lbeP73Zvu+Cm+GG26VWCvD0f+TM=", "requires": { "htmlparser2": "^3.9.2", "object-assign": "^4.1.1" @@ -6544,7 +6544,7 @@ "posthtml-render": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/posthtml-render/-/posthtml-render-1.1.4.tgz", - "integrity": "sha512-jL6eFIzoN3xUEvbo33OAkSDE2VIKU4JQ1wENOows1DpfnrdapR/K3Q1/fB43Mq7wQlcSgRm23nFrvoioufM7eA==" + "integrity": "sha1-ldrAmJL08YP61ayCPwj0LAJWVR4=" }, "prelude-ls": { "version": "1.1.2", @@ -6559,7 +6559,7 @@ "private": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==" + "integrity": "sha1-I4Hts2ifelPWUxkAYPz4ItLzaP8=" }, "process": { "version": "0.11.10", @@ -6569,12 +6569,12 @@ "process-nextick-args": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", - "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==" + "integrity": "sha1-o31zL0JxtKsa0HDTVQjoKQeI/6o=" }, "promise": { "version": "7.3.1", "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", + "integrity": "sha1-BktyYCsY+Q8pGSuLG8QY/9Hr078=", "optional": true, "requires": { "asap": "~2.0.3" @@ -6599,7 +6599,7 @@ "public-encrypt": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.2.tgz", - "integrity": "sha512-4kJ5Esocg8X3h8YgJsKAuoesBgB7mqH3eowiDzMUPKiRDDE7E/BqqZD1hnTByIaAFiwAw246YEltSq7tdrOH0Q==", + "integrity": "sha1-RuuRByBr9zSJ+LhbadkTNMZhCZQ=", "requires": { "bn.js": "^4.1.0", "browserify-rsa": "^4.0.0", @@ -6621,7 +6621,7 @@ "qs": { "version": "6.5.2", "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "integrity": "sha1-yzroBuh0BERYTvFUzo7pjUA/PjY=", "optional": true }, "query-string": { @@ -6663,7 +6663,7 @@ "randombytes": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.0.6.tgz", - "integrity": "sha512-CIQ5OFxf4Jou6uOKe9t1AOgqpeU5fd70A8NPdHSGeYXqXsPe6peOwI0cUl88RWZ6sP1vPMV3avd/R6cZ5/sP1A==", + "integrity": "sha1-0wLFIpSFiISKjTAMkytEwkIx2oA=", "requires": { "safe-buffer": "^5.1.0" } @@ -6671,7 +6671,7 @@ "randomfill": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "integrity": "sha1-ySGW/IarQr6YPxvzF3giSTHWFFg=", "requires": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" @@ -6685,7 +6685,7 @@ "readable-stream": { "version": "2.3.6", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "integrity": "sha1-sRwn2IuP8fvgcGQ8+UsMea4bCq8=", "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -6735,17 +6735,17 @@ "regenerate": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", - "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" + "integrity": "sha1-SoVuxLVuQHfFV1icroXnpMiGmhE=" }, "regenerator-runtime": { "version": "0.11.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", - "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" + "integrity": "sha1-vgWtf5v30i4Fb5cmzuUBf78Z4uk=" }, "regenerator-transform": { "version": "0.10.1", "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", - "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", + "integrity": "sha1-HkmWg3Ix2ot/PPQRTXG1aRoGgN0=", "requires": { "babel-runtime": "^6.18.0", "babel-types": "^6.19.0", @@ -6755,7 +6755,7 @@ "regex-not": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "integrity": "sha1-H07OJ+ALC2XgJHpoEOaoXYOldSw=", "requires": { "extend-shallow": "^3.0.2", "safe-regex": "^1.1.0" @@ -6810,7 +6810,7 @@ "request": { "version": "2.87.0", "resolved": "https://registry.npmjs.org/request/-/request-2.87.0.tgz", - "integrity": "sha512-fcogkm7Az5bsS6Sl0sibkbhcKsnyon/jV1kF3ajGmF0c8HrttdKTPRT9hieOaQHA5HEq6r8OyWOo/o781C1tNw==", + "integrity": "sha1-MvACNc0I1IK00NaNuTqCnA7VdW4=", "optional": true, "requires": { "aws-sign2": "~0.7.0", @@ -6838,7 +6838,7 @@ "resolve": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.8.1.tgz", - "integrity": "sha512-AicPrAC7Qu1JxPCZ9ZgCZlY35QgFnNqc+0LtbRNxnVw4TXvjQ72wnuL9JQcEBgXkI9JM8MsT9kaQoHcpCRJOYA==", + "integrity": "sha1-gvHsGaQjrB+9CAsLqwa6NuhKeiY=", "requires": { "path-parse": "^1.0.5" } @@ -6860,7 +6860,7 @@ "ret": { "version": "0.1.15", "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" + "integrity": "sha1-uKSCXVvbH8P29Twrwz+BOIaBx7w=" }, "rgb-regex": { "version": "1.0.1", @@ -6875,7 +6875,7 @@ "ripemd160": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "integrity": "sha1-ocGm9iR1FXe6XQeRTLyShQWFiQw=", "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -6884,7 +6884,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "integrity": "sha1-mR7GnSluAxN0fVm9/St0XDX4go0=" }, "safe-regex": { "version": "1.1.0", @@ -6897,13 +6897,13 @@ "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "integrity": "sha1-RPoWGwGHuVSd2Eu5GAL5vYOFzWo=", "optional": true }, "safer-eval": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/safer-eval/-/safer-eval-1.2.3.tgz", - "integrity": "sha512-nDwXOhiheoaBT6op02n8wzsshjLXHhh4YAeqsDEoVmy1k2+lGv/ENLsGaWqkaKArUkUx48VO12/ZPa3sI/OEqQ==", + "integrity": "sha1-c7p0o0vIoH1qRBNcgV/Rio7r56A=", "requires": { "clones": "^1.1.0" } @@ -6911,17 +6911,17 @@ "sax": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + "integrity": "sha1-KBYjTiN4vdxOU1T6tcqold9xANk=" }, "semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", - "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==" + "integrity": "sha1-3Eu8emyp2Rbe5dQ1FvAJK1j3uKs=" }, "send": { "version": "0.16.2", "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", - "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "integrity": "sha1-bsyh4PjBVtFBWXVZhI32RzCmu8E=", "requires": { "debug": "2.6.9", "depd": "~1.1.2", @@ -6941,14 +6941,14 @@ "mime": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", - "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" + "integrity": "sha1-Eh+evEnjdm8xGnbh+hyAA8SwOqY=" } } }, "serialize-to-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/serialize-to-js/-/serialize-to-js-1.2.1.tgz", - "integrity": "sha512-TK6d30GNkOLeFDPuP6Jfy1Q1V31GxzppYTt2lzr8KWmIUKomFj+260QP5o4AhHLu0pr6urgyS8i/Z1PqurjBoA==", + "integrity": "sha1-Lof2H5OIJtJMRjp8vQ3Skp7DgAg=", "requires": { "js-beautify": "^1.7.5", "safer-eval": "^1.2.3" @@ -6957,7 +6957,7 @@ "serve-static": { "version": "1.13.2", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", - "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", + "integrity": "sha1-CV6Ecv1bRiN9tQzkhqQ/S4bGzsE=", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -6973,7 +6973,7 @@ "set-value": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", - "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "integrity": "sha1-ca5KiPD+77v1LR6mBPP7MV67YnQ=", "requires": { "extend-shallow": "^2.0.1", "is-extendable": "^0.1.1", @@ -6999,12 +6999,12 @@ "setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + "integrity": "sha1-0L2FU2iHtv58DYGMuWLZ2RxU5lY=" }, "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -7052,7 +7052,7 @@ "is-arrayish": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + "integrity": "sha1-RXSirlb3qyBolvtDHq7tBm/fjwM=" } } }, @@ -7064,7 +7064,7 @@ "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "integrity": "sha1-ZJIufFZbDhQgS6GqfWlkJ40lGC0=", "requires": { "base": "^0.11.1", "debug": "^2.2.0", @@ -7097,7 +7097,7 @@ "snapdragon-node": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "integrity": "sha1-bBdfhv8UvbByRWPo88GwIaKGhTs=", "requires": { "define-property": "^1.0.0", "isobject": "^3.0.0", @@ -7115,7 +7115,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -7123,7 +7123,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -7131,7 +7131,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -7146,14 +7146,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "snapdragon-util": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "integrity": "sha1-+VZHlIbyrNeXAGk/b3uAXkWrVuI=", "requires": { "kind-of": "^3.2.0" } @@ -7174,7 +7174,7 @@ "source-map-resolve": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz", - "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==", + "integrity": "sha1-cuLMNAlVQ+Q7LGKyxMENSpBU8lk=", "requires": { "atob": "^2.1.1", "decode-uri-component": "^0.2.0", @@ -7186,7 +7186,7 @@ "source-map-support": { "version": "0.4.18", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", - "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", + "integrity": "sha1-Aoam3ovkJkEzhZTpfM6nXwosWF8=", "requires": { "source-map": "^0.5.6" } @@ -7199,7 +7199,7 @@ "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "integrity": "sha1-fLCd2jqGWFcFxks5pkZgOGguj+I=", "requires": { "extend-shallow": "^3.0.0" } @@ -7229,12 +7229,12 @@ "stable": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" + "integrity": "sha1-g26zyDgv4pNv6vVEYxAXzn1Ho88=" }, "static-eval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.0.tgz", - "integrity": "sha512-6flshd3F1Gwm+Ksxq463LtFd1liC77N/PX1FVVc3OzL3hAmo2fwHFbuArkcfi7s9rTNsLEhcRmXGFZhlgy40uw==", + "integrity": "sha1-DoIfiSaEfe97S1DNpdVcBKmxOGQ=", "requires": { "escodegen": "^1.8.1" } @@ -7261,7 +7261,7 @@ "static-module": { "version": "2.2.5", "resolved": "https://registry.npmjs.org/static-module/-/static-module-2.2.5.tgz", - "integrity": "sha512-D8vv82E/Kpmz3TXHKG8PPsCPg+RAX6cbCOyvjM6x04qZtQ47EtJFVwRsdov3n5d6/6ynrOY9XB4JkaZwB2xoRQ==", + "integrity": "sha1-vUCrzq4z2mt6+4Sg5DKf+IUr+78=", "requires": { "concat-stream": "~1.6.0", "convert-source-map": "^1.5.1", @@ -7282,7 +7282,7 @@ "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" + "integrity": "sha1-u3PURtonlhBu/MG2AaJT1sRr0Ic=" }, "stream-browserify": { "version": "2.0.1", @@ -7296,7 +7296,7 @@ "stream-http": { "version": "2.8.3", "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", - "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "integrity": "sha1-stJCRpKIpaJ+xP6JM6z2I95lFPw=", "requires": { "builtin-status-codes": "^3.0.0", "inherits": "^2.0.1", @@ -7318,7 +7318,7 @@ "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "integrity": "sha1-nPFhG6YmhdcDCunkujQUnDrwP8g=", "requires": { "safe-buffer": "~5.1.0" } @@ -7344,7 +7344,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -7352,7 +7352,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -7362,7 +7362,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -7382,7 +7382,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -7402,12 +7402,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -7439,7 +7439,7 @@ "terser": { "version": "3.8.1", "resolved": "https://registry.npmjs.org/terser/-/terser-3.8.1.tgz", - "integrity": "sha512-FRin3gKQ0vm0xPPLuxw1FqpVgv1b2pBpYCaFb5qe6A7sD749Fnq1VbDiX3CEFM0BV0fqDzFtBfgmxhxCdzKQIg==", + "integrity": "sha1-y3AHCsngpxrdFp37Y8CmT8onOKw=", "requires": { "commander": "~2.16.0", "source-map": "~0.6.1", @@ -7449,17 +7449,17 @@ "commander": { "version": "2.16.0", "resolved": "https://registry.npmjs.org/commander/-/commander-2.16.0.tgz", - "integrity": "sha512-sVXqklSaotK9at437sFlFpyOcJonxe0yST/AG9DkQKUdIE6IqGIMv4SfAQSKaJbSdVEJYItASCrBiVQHq1HQew==" + "integrity": "sha1-8WOQWTmWzrTz7rAgsx14Uo9/ilA=" }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "source-map-support": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.6.tgz", - "integrity": "sha512-N4KXEz7jcKqPf2b2vZF11lQIz9W5ZMuUcIOGj243lduidkf2fjkVKJS9vNxVWn3u/uxX38AcE8U9nnH9FPcq+g==", + "integrity": "sha1-RDXO5Gsaq2K46GEM5g94gJHFHBM=", "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -7479,7 +7479,7 @@ "timers-browserify": { "version": "2.0.10", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.10.tgz", - "integrity": "sha512-YvC1SV1XdOUaL6gx5CoGroT3Gu49pK9+TZ38ErPldOWW4j49GI1HKs9DV+KGq/w6y+LZ72W1c8cKz2vzY+qpzg==", + "integrity": "sha1-HSjj0qrfHVpZlsTp+VYBzQU0gK4=", "requires": { "setimmediate": "^1.0.4" } @@ -7515,7 +7515,7 @@ "to-regex": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "integrity": "sha1-E8/dmzNlUvMLUfM6iuG0Knp1mc4=", "requires": { "define-property": "^2.0.2", "extend-shallow": "^3.0.2", @@ -7545,17 +7545,17 @@ "toml": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.3.tgz", - "integrity": "sha512-O7L5hhSQHxuufWUdcTRPfuTh3phKfAZ/dqfxZFoxPCj2RYmpaSGLEIs016FCXItQwNr08yefUB5TSjzRYnajTA==" + "integrity": "sha1-jWg9cpV3yyhiMd/HqK/+WNMXKPs=" }, "tomlify-j0.4": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/tomlify-j0.4/-/tomlify-j0.4-3.0.0.tgz", - "integrity": "sha512-2Ulkc8T7mXJ2l0W476YC/A209PR38Nw8PuaCNtk9uI3t1zzFdGQeWYGQvmj2PZkVvRC/Yoi4xQKMRnWc/N29tQ==" + "integrity": "sha1-mUFNRSaMOjuL84voIUW3u6NLdHM=" }, "tough-cookie": { "version": "2.3.4", "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz", - "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==", + "integrity": "sha1-7GDO44rGdQY//JelwYlwV47oNlU=", "optional": true, "requires": { "punycode": "^1.4.1" @@ -7706,7 +7706,7 @@ "upath": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/upath/-/upath-1.1.0.tgz", - "integrity": "sha512-bzpH/oBhoS/QI/YtbkqCg6VEiPYjSZtrHQM6/QnJS6OL9pKUFLqb3aFh4Scvwm45+7iAgiMkLhSbaZxUqmrprw==" + "integrity": "sha1-NSVll+RqWB20eT0M5H+prr/J+r0=" }, "urix": { "version": "0.1.0", @@ -7732,12 +7732,12 @@ "use": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==" + "integrity": "sha1-1QyMrHmhn7wg8pEfVuuXP04QBw8=" }, "util": { "version": "0.10.4", "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", - "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "integrity": "sha1-OqASW/5mikZy3liFfTrOJ+y3aQE=", "requires": { "inherits": "2.0.3" }, @@ -7757,7 +7757,7 @@ "util.promisify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", + "integrity": "sha1-RA9xZaRZyaFtwUXrjnLzVocJcDA=", "requires": { "define-properties": "^1.1.2", "object.getownpropertydescriptors": "^2.0.3" @@ -7766,18 +7766,18 @@ "uuid": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.2.1.tgz", - "integrity": "sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA==", + "integrity": "sha1-EsUou51Y0LkmXZovbw/ovhf/HxQ=", "optional": true }, "v8-compile-cache": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.0.0.tgz", - "integrity": "sha512-qNdTUMaCjPs4eEnM3W9H94R3sU70YCuT+/ST7nUf+id1bVOrdjrpUaeZLqPBPRph3hsgn4a4BvwpxhHZx+oSDg==" + "integrity": "sha1-UmSS41/GFoZChHALcEPgG67gnwo=" }, "vendors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.2.tgz", - "integrity": "sha512-w/hry/368nO21AN9QljsaIhb9ZiZtZARoVH5f3CsFbawdLdayCgKRPup7CggujvySMxx0I91NOyxdVENohprLQ==" + "integrity": "sha1-f8te759WI7FWvOqJ7DfWNnbyGAE=" }, "verror": { "version": "1.10.0", @@ -7793,7 +7793,7 @@ "vlq": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", - "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==" + "integrity": "sha1-jz5DKM9jsVQMDWfhsneDhviXWyY=" }, "vm-browserify": { "version": "0.0.4", @@ -7819,7 +7819,7 @@ "which": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "integrity": "sha1-pFBD1U9YBTFtqNYvn1CRjT2nCwo=", "requires": { "isexe": "^2.0.0" } @@ -7827,7 +7827,7 @@ "ws": { "version": "5.2.2", "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "integrity": "sha1-3/7xSGa46NyRM1glFNG++vlumA8=", "requires": { "async-limiter": "~1.0.0" } diff --git a/shuup/regions/npm-shrinkwrap.json b/shuup/regions/npm-shrinkwrap.json index 910dbb0ee0..f3d8b89a08 100644 --- a/shuup/regions/npm-shrinkwrap.json +++ b/shuup/regions/npm-shrinkwrap.json @@ -7,7 +7,7 @@ "@mrmlnc/readdir-enhanced": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", + "integrity": "sha1-UkryQNGjYFJ7cwR17PoTRKpUDd4=", "requires": { "call-me-maybe": "^1.0.1", "glob-to-regexp": "^0.3.0" @@ -16,17 +16,17 @@ "@nodelib/fs.stat": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.0.tgz", - "integrity": "sha512-LAQ1d4OPfSJ/BMbI2DuizmYrrkD9JMaTdi2hQTlI53lQ4kRQPyZQRS4CYQ7O66bnBBnP/oYdRxbk++X0xuFU6A==" + "integrity": "sha1-UMHiJgrA7ZQ5oYHeNyWgFo1ZxIo=" }, "abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" + "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=" }, "acorn": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.1.tgz", - "integrity": "sha512-d+nbxBUGKg7Arpsvbnlq61mc12ek3EY8EQldM3GPAhWJ1UVxC6TDGbIvUMNU6obBX3i1+ptCIzV4vq0gFPEGVQ==" + "integrity": "sha1-8JWCkpdwanyXdpWMCvyJMKm52dg=" }, "alphanum-sort": { "version": "1.0.2", @@ -46,7 +46,7 @@ "ansi-to-html": { "version": "0.6.6", "resolved": "https://registry.npmjs.org/ansi-to-html/-/ansi-to-html-0.6.6.tgz", - "integrity": "sha512-90M/2sZna3OsoOEbSyXK46poFnlClBC53Rx6etNKQK7iShsX5fI5E/M9Ld6FurtLaxAWLuAPi0Jp8p3y5oAkxg==", + "integrity": "sha1-WKjQS4fsmoXjrSc8EqX7xxR7nEI=", "requires": { "entities": "^1.1.1" } @@ -54,7 +54,7 @@ "anymatch": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "integrity": "sha1-vLJLTzeTTZqnrBe0ra+J58du8us=", "requires": { "micromatch": "^3.1.4", "normalize-path": "^2.1.1" @@ -63,7 +63,7 @@ "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "integrity": "sha1-vNZ5HqWuCXJeF+WtmIE0zUCz2RE=", "requires": { "sprintf-js": "~1.0.2" } @@ -76,7 +76,7 @@ "arr-flatten": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" + "integrity": "sha1-NgSLv/TntH4TZkQxbJlmnqWukfE=" }, "arr-union": { "version": "3.1.0", @@ -91,7 +91,7 @@ "asn1.js": { "version": "4.10.1", "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", - "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "integrity": "sha1-ucK/WAXx5kqt7tbfOiv6+1pz9aA=", "requires": { "bn.js": "^4.0.0", "inherits": "^2.0.1", @@ -134,7 +134,7 @@ "async-limiter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz", - "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==" + "integrity": "sha1-ePrtjD0HSrgfIrTphdeehzj3IPg=" }, "atob": { "version": "2.1.1", @@ -171,7 +171,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -202,7 +202,7 @@ "babel-core": { "version": "6.26.3", "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", - "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", + "integrity": "sha1-suLwnjQtDwyI4vAuBneUEl51wgc=", "requires": { "babel-code-frame": "^6.26.0", "babel-generator": "^6.26.0", @@ -228,7 +228,7 @@ "babel-generator": { "version": "6.26.1", "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", - "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "integrity": "sha1-GERAjTuPDTWkBOp6wYDwh6YBvZA=", "requires": { "babel-messages": "^6.23.0", "babel-runtime": "^6.26.0", @@ -530,7 +530,7 @@ "babel-plugin-transform-es2015-modules-commonjs": { "version": "6.26.2", "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", - "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "integrity": "sha1-WKeThjqefKhwvcWogRF/+sJ9tvM=", "requires": { "babel-plugin-transform-strict-mode": "^6.24.1", "babel-runtime": "^6.26.0", @@ -673,7 +673,7 @@ "babel-preset-env": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.7.0.tgz", - "integrity": "sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==", + "integrity": "sha1-3qefpOvriDzTXasH4mDBycBN93o=", "requires": { "babel-plugin-check-es2015-constants": "^6.22.0", "babel-plugin-syntax-trailing-function-commas": "^6.22.0", @@ -772,7 +772,7 @@ "babylon": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==" + "integrity": "sha1-ry87iPpvXB5MY00aD46sT1WzleM=" }, "babylon-walk": { "version": "1.0.2", @@ -792,7 +792,7 @@ "base": { "version": "0.11.2", "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "integrity": "sha1-e95c7RRbbVUakNuH+DxVi060io8=", "requires": { "cache-base": "^1.0.1", "class-utils": "^0.3.5", @@ -814,7 +814,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -822,7 +822,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -830,7 +830,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -840,14 +840,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "base64-js": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.0.tgz", - "integrity": "sha512-ccav/yGvoa80BQDljCxsmmQ3Xvx60/UpBIij5QN21W3wBi/hhIC9OoO+KLpu9IJTS9j4DRVJ3aDDF9cMSoa2lw==" + "integrity": "sha1-yrHmEY8FEJXli1KBrqjBzSK/wOM=" }, "binary-extensions": { "version": "1.11.0", @@ -862,12 +862,12 @@ "bluebird": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", - "integrity": "sha512-MKiLiV+I1AA596t9w1sQJ8jkiSr5+ZKi0WKrYGUn6d1Fx+Ij4tIj+m2WMQSGczs5jZVxV339chE8iwk6F64wjA==" + "integrity": "sha1-2VUfnemPH82h5oPRfukaBgLuLrk=" }, "bn.js": { "version": "4.11.8", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", - "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==" + "integrity": "sha1-LN4J617jQfSEdGuwMJsyU7GxRC8=" }, "boolbase": { "version": "1.0.0", @@ -877,7 +877,7 @@ "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "integrity": "sha1-PH/L9SnYcibz0vUrlm/1Jx60Qd0=", "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -886,7 +886,7 @@ "braces": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "integrity": "sha1-WXn9PxTNUxVl5fot8av/8d+u5yk=", "requires": { "arr-flatten": "^1.1.0", "array-unique": "^0.3.2", @@ -913,7 +913,7 @@ "brfs": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/brfs/-/brfs-1.6.1.tgz", - "integrity": "sha512-OfZpABRQQf+Xsmju8XE9bDjs+uU4vLREGolP7bDgcpsI17QREyZ4Bl+2KLxxx1kCgA0fAIhKQBaBYh+PEcCqYQ==", + "integrity": "sha1-t4ziM22BjiXuoEoJR8um1PuIScM=", "requires": { "quote-stream": "^1.0.1", "resolve": "^1.1.5", @@ -929,7 +929,7 @@ "browserify-aes": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "integrity": "sha1-Mmc0ZC9APavDADIJhTu3CtQo70g=", "requires": { "buffer-xor": "^1.0.3", "cipher-base": "^1.0.0", @@ -942,7 +942,7 @@ "browserify-cipher": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "integrity": "sha1-jWR0wbhwv9q807z8wZNKEOlPFfA=", "requires": { "browserify-aes": "^1.0.4", "browserify-des": "^1.0.0", @@ -952,7 +952,7 @@ "browserify-des": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.1.tgz", - "integrity": "sha512-zy0Cobe3hhgpiOM32Tj7KQ3Vl91m0njwsjzZQK1L+JDf11dzP9qIvjreVinsvXrgfjhStXwUWAEpB9D7Gwmayw==", + "integrity": "sha1-M0MSTbbXrVPiaogmMYcSvchFD5w=", "requires": { "cipher-base": "^1.0.1", "des.js": "^1.0.0", @@ -985,7 +985,7 @@ "browserify-zlib": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "integrity": "sha1-KGlFnZqjviRf6P4sofRuLn9U1z8=", "requires": { "pako": "~1.0.5" } @@ -993,7 +993,7 @@ "browserslist": { "version": "3.2.8", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", - "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "integrity": "sha1-sABTYdZHHw9ZUnl6dvyYXx+Xj8Y=", "requires": { "caniuse-lite": "^1.0.30000844", "electron-to-chromium": "^1.3.47" @@ -1017,7 +1017,7 @@ "buffer-from": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.0.tgz", - "integrity": "sha512-c5mRlguI/Pe2dSZmpER62rSCu0ryKmWddzRYsuXc50U2/g8jMOulc31VZMa4mYx31U5xsmSOpDCgH88Vl9cDGQ==" + "integrity": "sha1-h/yqOimDWOCt5uRCz86EB0DRrQQ=" }, "buffer-xor": { "version": "1.0.3", @@ -1032,7 +1032,7 @@ "cache-base": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "integrity": "sha1-Cn9GQWgxyLZi7jb+TnxZ129marI=", "requires": { "collection-visit": "^1.0.0", "component-emitter": "^1.2.1", @@ -1053,7 +1053,7 @@ "caniuse-api": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "integrity": "sha1-Xk2Q4idJYdRikZl99Znj7QCO5MA=", "requires": { "browserslist": "^4.0.0", "caniuse-lite": "^1.0.0", @@ -1064,7 +1064,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -1074,7 +1074,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" } } }, @@ -1093,7 +1093,7 @@ "caniuse-lite": { "version": "1.0.30000860", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000860.tgz", - "integrity": "sha512-6HCqcu+cCwWCY+WLL+rtAsAFt1ufvqMhA8dTfhMQhCJHYhJDhRRrh105DfjqRlTrDK3vvbEq8K0drNsJbymDtQ==" + "integrity": "sha1-yQLj3zzEMD9jGrx6FXmjzgphU6M=" }, "chalk": { "version": "1.1.3", @@ -1110,7 +1110,7 @@ "chokidar": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.0.4.tgz", - "integrity": "sha512-z9n7yt9rOvIJrMhvDtDictKrkFHeihkNl6uWMmZlmL6tJtX9Cs+87oK+teBx+JIgzvbX3yZHT3eF8vpbDxHJXQ==", + "integrity": "sha1-NW/04rDo5D4yLRijckYLvPOszSY=", "requires": { "anymatch": "^2.0.0", "async-each": "^1.0.0", @@ -1130,7 +1130,7 @@ "cipher-base": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "integrity": "sha1-h2Dk7MJy9MNjUy+SbYdKriwTl94=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -1139,7 +1139,7 @@ "clap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/clap/-/clap-1.2.3.tgz", - "integrity": "sha512-4CoL/A3hf90V3VIEjeuhSvlGFEHKzOz+Wfc2IVZc+FaUgU0ZQafJTP49fvnULipOPcAfqhyI2duwQyns6xqjYA==", + "integrity": "sha1-TzZ0WzIAhJJVf0ZBLWbVDLmbzlE=", "requires": { "chalk": "^1.1.3" } @@ -1147,7 +1147,7 @@ "class-utils": { "version": "0.3.6", "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "integrity": "sha1-+TNprouafOAv1B+q0MqDAzGQxGM=", "requires": { "arr-union": "^3.1.0", "define-property": "^0.2.5", @@ -1176,7 +1176,7 @@ "cli-spinners": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-1.3.1.tgz", - "integrity": "sha512-1QL4544moEsDVH9T/l6Cemov/37iv1RtoKf7NJ04A60+4MREXNfx/QvavbH6QoGdsD4N4Mwy49cmaINR/o2mdg==" + "integrity": "sha1-ACwZkJEtDVlYDJO9NsBW3pnkJZo=" }, "clone": { "version": "2.1.2", @@ -1191,7 +1191,7 @@ "coa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.1.tgz", - "integrity": "sha512-5wfTTO8E2/ja4jFSxePXlG5nRu5bBtL/r1HCIpJW/lzT6yDtKl0u0Z4o/Vpz32IpKmBn7HerheEZQgA9N2DarQ==", + "integrity": "sha1-8/iwsVBz411wJj+xBCyywCPbOK8=", "requires": { "q": "^1.1.2" } @@ -1208,7 +1208,7 @@ "color": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", + "integrity": "sha1-2SC0Mo1TSjrIKV1o971LpsQnvpo=", "requires": { "color-convert": "^1.9.1", "color-string": "^1.5.2" @@ -1217,7 +1217,7 @@ "color-convert": { "version": "1.9.2", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.2.tgz", - "integrity": "sha512-3NUJZdhMhcdPn8vJ9v2UQJoH0qqoGUkYTgFEPZaPjEtwmmKUfNV46zZmgB2M5M4DCEQHMaCfWHCxiBflLm04Tg==", + "integrity": "sha1-SYgbj7pn3xKpa98/VsCqueeRMUc=", "requires": { "color-name": "1.1.1" } @@ -1230,7 +1230,7 @@ "color-string": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "integrity": "sha1-ybvF8BtYtUkvPWhXRZy2WQziBMw=", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -1279,12 +1279,12 @@ "command-exists": { "version": "1.2.7", "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.7.tgz", - "integrity": "sha512-doWDvhXCcW5LK0cIUWrOQ8oMFXJv3lEQCkJpGVjM8v9SV0uhqYXB943538tEA2CiaWqSyuYUGAm5ezDwEx9xlw==" + "integrity": "sha1-FoKPDD/ysMWIBYYe8hG2T8FWkqg=" }, "commander": { "version": "2.13.0", "resolved": "https://registry.npmjs.org/commander/-/commander-2.13.0.tgz", - "integrity": "sha512-MVuS359B+YzaWqjCL/c+22gfryv+mCBPHAv3zyVI2GN8EY6IRP8VwtasXn8jyyhvvq84R4ImN1OKRtcbIasjYA==" + "integrity": "sha1-aWS8pnaF33wfFDDFhPB9dZeIW5w=" }, "component-emitter": { "version": "1.2.1", @@ -1299,7 +1299,7 @@ "concat-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "integrity": "sha1-kEvfGUzTEi/Gdcd/xKw9T/D9GjQ=", "requires": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", @@ -1342,7 +1342,7 @@ "core-js": { "version": "2.5.7", "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", - "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==" + "integrity": "sha1-+XJgj/DOrWi4QaFqky0LGDeRgU4=" }, "core-util-is": { "version": "1.0.2", @@ -1352,7 +1352,7 @@ "cosmiconfig": { "version": "5.0.5", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.0.5.tgz", - "integrity": "sha512-94j37OtvxS5w7qr7Ta6dt67tWdnOxigBVN4VnSxNXFez9o18PGQ0D33SchKP17r9LAcWVTYV72G6vDayAUBFIg==", + "integrity": "sha1-qAnjwjBokc4Xq3A1nci99mH+LNA=", "requires": { "is-directory": "^0.3.1", "js-yaml": "^3.9.0", @@ -1373,7 +1373,7 @@ "create-ecdh": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", - "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "integrity": "sha1-yREbbzMEXEaX8UR4f5JUzcd8Rf8=", "requires": { "bn.js": "^4.1.0", "elliptic": "^6.0.0" @@ -1382,7 +1382,7 @@ "create-hash": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "integrity": "sha1-iJB4rxGmN1a8+1m9IhmWvjqe8ZY=", "requires": { "cipher-base": "^1.0.1", "inherits": "^2.0.1", @@ -1394,7 +1394,7 @@ "create-hmac": { "version": "1.1.7", "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "integrity": "sha1-aRcMeLOrlXFHsriwRXLkfq0iQ/8=", "requires": { "cipher-base": "^1.0.3", "create-hash": "^1.1.0", @@ -1407,7 +1407,7 @@ "crypto-browserify": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "integrity": "sha1-OWz58xN/A+S45TLFj2mCVOAPgOw=", "requires": { "browserify-cipher": "^1.0.0", "browserify-sign": "^4.0.0", @@ -1430,7 +1430,7 @@ "css-declaration-sorter": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-3.0.1.tgz", - "integrity": "sha512-jH4024SHZ3e0M7ann9VxpFpH3moplRXNz9ZBqvFMZqi09Yo5ARbs2wdPH8GqN9iRTlQynrbGbraNbBxBLei85Q==", + "integrity": "sha1-0OMFaw/YjcHqnc7/Q1rb6ccCp/g=", "requires": { "postcss": "^6.0.0", "timsort": "^0.3.0" @@ -1455,7 +1455,7 @@ "css-tree": { "version": "1.0.0-alpha25", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha25.tgz", - "integrity": "sha512-XC6xLW/JqIGirnZuUWHXCHRaAjje2b3OIB0Vj5RIJo6mIi/AdJo30quQl5LxUl0gkXDIrTrFGbMlcZjyFplz1A==", + "integrity": "sha1-G7+r+/bu708B2RCP8u3Qvi/jVZc=", "requires": { "mdn-data": "^1.0.0", "source-map": "^0.5.3" @@ -1479,7 +1479,7 @@ "cssnano": { "version": "4.0.5", "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.0.5.tgz", - "integrity": "sha512-P2O0sz/YAAzqZVsSWOrbliPCr0c6abwVNQmFZ48AgejN/GbzwEf6IVFGQAj0UKHC+crv60wUAPQocAnDmeWlkg==", + "integrity": "sha1-h4m1/b574F2KD35FxMeJ6+cS9ao=", "requires": { "cosmiconfig": "^5.0.0", "cssnano-preset-default": "^4.0.0", @@ -1550,7 +1550,7 @@ "csso": { "version": "3.5.1", "resolved": "https://registry.npmjs.org/csso/-/csso-3.5.1.tgz", - "integrity": "sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==", + "integrity": "sha1-e564vmFiiXPBsmHhadLwJACOdYs=", "requires": { "css-tree": "1.0.0-alpha.29" }, @@ -1558,7 +1558,7 @@ "css-tree": { "version": "1.0.0-alpha.29", "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.29.tgz", - "integrity": "sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==", + "integrity": "sha1-P6nU7zFCy9HDAedmTB81K9gvWjk=", "requires": { "mdn-data": "~1.1.0", "source-map": "^0.5.3" @@ -1574,7 +1574,7 @@ "deasync": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/deasync/-/deasync-0.1.13.tgz", - "integrity": "sha512-/6ngYM7AapueqLtvOzjv9+11N2fHDSrkxeMF1YPE20WIfaaawiBg+HZH1E5lHrcJxlKR42t6XPOEmMmqcAsU1g==", + "integrity": "sha1-gVwrabvREXyuVwFSzYlWYcCfIOo=", "requires": { "bindings": "~1.2.1", "nan": "^2.0.7" @@ -1583,7 +1583,7 @@ "debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "integrity": "sha1-XRKFFd8TT/Mn6QpMk/Tgd6U2NB8=", "requires": { "ms": "2.0.0" } @@ -1630,7 +1630,7 @@ "define-property": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "integrity": "sha1-1Flono1lS6d+AqgX+HENcCyxbp0=", "requires": { "is-descriptor": "^1.0.2", "isobject": "^3.0.1" @@ -1639,7 +1639,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -1647,7 +1647,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -1655,7 +1655,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -1665,7 +1665,7 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, @@ -1704,7 +1704,7 @@ "diffie-hellman": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "integrity": "sha1-QOjumPVaIUlgcUaSHGPhrl89KHU=", "requires": { "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", @@ -1730,7 +1730,7 @@ "domain-browser": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==" + "integrity": "sha1-PTH1AZGmdJ3RN1p/Ui6CPULlTto=" }, "domelementtype": { "version": "1.3.0", @@ -1740,7 +1740,7 @@ "domhandler": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", + "integrity": "sha1-iAUJfpM9ZehVRvcm1g9euItE+AM=", "requires": { "domelementtype": "1" } @@ -1757,7 +1757,7 @@ "dot-prop": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz", - "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==", + "integrity": "sha1-HxngwuGqDjJ5fEl5nyg3rGr2nFc=", "requires": { "is-obj": "^1.0.0" } @@ -1765,7 +1765,7 @@ "dotenv": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-5.0.1.tgz", - "integrity": "sha512-4As8uPrjfwb7VXC+WnLCbXK7y+Ueb2B3zgNCePYfhxS1PYeaO1YTeplffTEcbfLhvFNGLAz90VvJs9yomG7bow==" + "integrity": "sha1-pTF0Wb09eauIz/bkQFemo/ux/O8=" }, "duplexer2": { "version": "0.1.4", @@ -1778,7 +1778,7 @@ "editorconfig": { "version": "0.13.3", "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-0.13.3.tgz", - "integrity": "sha512-WkjsUNVCu+ITKDj73QDvi0trvpdDWdkDyHybDGSXPfekLCqwmpD7CP7iPbvBgosNuLcI96XTDwNa75JyFl7tEQ==", + "integrity": "sha1-5SGeWHlR1glY/ZTqmpoAjN7/GzQ=", "requires": { "bluebird": "^3.0.5", "commander": "^2.9.0", @@ -1834,7 +1834,7 @@ "error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "integrity": "sha1-tKxAZIEH/c3PriQvQovqihTU8b8=", "requires": { "is-arrayish": "^0.2.1" } @@ -1842,7 +1842,7 @@ "es-abstract": { "version": "1.12.0", "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.12.0.tgz", - "integrity": "sha512-C8Fx/0jFmV5IPoMOFPA9P9G5NtqW+4cOPit3MIuvR2t7Ag2K15EJTpxnHAYTzL+aYQJIESYeXZmDBfOBE1HcpA==", + "integrity": "sha1-nbvdJ8aFbwABQhyhh4LXhr+KYWU=", "requires": { "es-to-primitive": "^1.1.1", "function-bind": "^1.1.1", @@ -1874,7 +1874,7 @@ "escodegen": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.9.1.tgz", - "integrity": "sha512-6hTjO1NAWkHnDk3OqQ4YrCuwwmGHL9S3nPlzBOUG/R44rda3wLNrfvQ5fkSGjyhHFKM7ALPKcKGrwvCLe0lC7Q==", + "integrity": "sha1-264X75bI5L7bE1b0UE+kzC98t+I=", "requires": { "esprima": "^3.1.3", "estraverse": "^4.2.0", @@ -1891,7 +1891,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=", "optional": true } } @@ -1899,7 +1899,7 @@ "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + "integrity": "sha1-E7BM2z5sXRnfkatph6hpVhmwqnE=" }, "estraverse": { "version": "4.2.0", @@ -1924,7 +1924,7 @@ "evp_bytestokey": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "integrity": "sha1-f8vbGY3HGVlDLv4ThCaE4FJaywI=", "requires": { "md5.js": "^1.3.4", "safe-buffer": "^5.1.1" @@ -1974,7 +1974,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -1984,7 +1984,7 @@ "extglob": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "integrity": "sha1-rQD+TcYSqSMuhxhxHcXLWrAoVUM=", "requires": { "array-unique": "^0.3.2", "define-property": "^1.0.0", @@ -2015,7 +2015,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -2023,7 +2023,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -2031,7 +2031,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -2041,7 +2041,7 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, @@ -2066,7 +2066,7 @@ "fast-glob": { "version": "2.2.2", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.2.tgz", - "integrity": "sha512-TR6zxCKftDQnUAPvkrCWdBgDq/gbqx8A3ApnBrR5rMvpp6+KMJI0Igw7fkWPgeVK0uhRXTXdvO3O+YP0CaUX2g==", + "integrity": "sha1-cXIzOKybTg4v/x1nSKKhPV7TUr8=", "requires": { "@mrmlnc/readdir-enhanced": "^2.2.1", "@nodelib/fs.stat": "^1.0.1", @@ -2084,7 +2084,7 @@ "filesize": { "version": "3.6.1", "resolved": "https://registry.npmjs.org/filesize/-/filesize-3.6.1.tgz", - "integrity": "sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg==" + "integrity": "sha1-CQuz7gG2+AGoqL6Z0xcQs0Irsxc=" }, "fill-range": { "version": "4.0.0", @@ -2138,7 +2138,7 @@ "fsevents": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", - "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "integrity": "sha1-9B3LGvJYKvNpLaNvxVy9jhBBxCY=", "optional": true, "requires": { "nan": "^2.9.2", @@ -2600,7 +2600,7 @@ "fswatcher-child": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/fswatcher-child/-/fswatcher-child-1.0.5.tgz", - "integrity": "sha512-T5BsoXc63WcPKLcQh77g3oJOqCHnXPp/QLuLgD9jhRBwDuOiVXL8PL6Dcy3ByfsdZmHKYQuPYN8PXEphyoS4qA==", + "integrity": "sha1-E00BL/p0kYl1YX4A5W5BOfNssUA=", "requires": { "chokidar": "^2.0.3" } @@ -2608,7 +2608,7 @@ "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "integrity": "sha1-pWiZ0+o8m6uHS7l3O3xe3pL0iV0=" }, "get-port": { "version": "3.2.0", @@ -2647,7 +2647,7 @@ "globals": { "version": "9.18.0", "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==" + "integrity": "sha1-qjiWs+abSH8X4x7SFD1pqOMMLYo=" }, "graceful-fs": { "version": "4.1.11", @@ -2666,7 +2666,7 @@ "has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "integrity": "sha1-ci18v8H2qoJB8W3YFOAR4fQeh5Y=", "requires": { "function-bind": "^1.1.1" } @@ -2720,7 +2720,7 @@ "hash.js": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.4.tgz", - "integrity": "sha512-A6RlQvvZEtFS5fLU43IDu0QUmBy+fDO9VMdTXvufKwIkt/rFfvICAViCax5fbDO4zdNzaC3/27ZhKUok5bAJyw==", + "integrity": "sha1-i1Dh811RvQHl7Z7OTb41Scz6Cjw=", "requires": { "inherits": "^2.0.3", "minimalistic-assert": "^1.0.0" @@ -2729,7 +2729,7 @@ "hex-color-regex": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" + "integrity": "sha1-TAb8y0YC/iYCs8k9+C1+fb8aio4=" }, "hmac-drbg": { "version": "1.0.1", @@ -2768,7 +2768,7 @@ "htmlnano": { "version": "0.1.10", "resolved": "https://registry.npmjs.org/htmlnano/-/htmlnano-0.1.10.tgz", - "integrity": "sha512-eTEUzz8VdWYp+w/KUdb99kwao4reR64epUySyZkQeepcyzPQ2n2EPWzibf6QDxmkGy10Kr+CKxYqI3izSbmhJQ==", + "integrity": "sha1-oKVI60x2rizyQj7HolyIFzTT3qY=", "requires": { "cssnano": "^3.4.0", "object-assign": "^4.0.1", @@ -2900,7 +2900,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -3165,7 +3165,7 @@ "ieee754": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.12.tgz", - "integrity": "sha512-GguP+DRY+pJ3soyIiGPTvdiVXjZ+DbXOxGpXn3eMvNW4x4irjqXm4wHKscC+TfxSJ0yw/S1F24tqdMNsMZTiLA==" + "integrity": "sha1-UL8k5bnIu5ivSWTJQc2wkY2ntgs=" }, "indexes-of": { "version": "1.0.1", @@ -3185,12 +3185,12 @@ "ini": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" + "integrity": "sha1-7uJfVtscnsYIXgwid4CD9Zar+Sc=" }, "invariant": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "integrity": "sha1-YQ88ksk1nOHbYW5TgAjSP/NRWOY=", "requires": { "loose-envify": "^1.0.0" } @@ -3224,12 +3224,12 @@ "is-buffer": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + "integrity": "sha1-76ouqdqg16suoTqXsritUf776L4=" }, "is-callable": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", - "integrity": "sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==" + "integrity": "sha1-HhrfIZ4e62hNaR+dagX/DTCiTXU=" }, "is-color-stop": { "version": "1.1.0", @@ -3260,7 +3260,7 @@ "is-descriptor": { "version": "0.1.6", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "integrity": "sha1-Nm2CQN3kh8pRgjsaufB6EKeCUco=", "requires": { "is-accessor-descriptor": "^0.1.6", "is-data-descriptor": "^0.1.4", @@ -3270,7 +3270,7 @@ "kind-of": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + "integrity": "sha1-cpyR4thXt6QZofmqZWhcTDP1hF0=" } } }, @@ -3326,7 +3326,7 @@ "is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "integrity": "sha1-LBY7P6+xtgbZ0Xko8FwqHDjgdnc=", "requires": { "isobject": "^3.0.1" } @@ -3342,12 +3342,12 @@ "is-resolvable": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" + "integrity": "sha1-+xj4fOH+uSUWnJpAfBkxijIG7Yg=" }, "is-svg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-svg/-/is-svg-3.0.0.tgz", - "integrity": "sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ==", + "integrity": "sha1-kyHb0pwhLlypnE+peUxxS8r6L3U=", "requires": { "html-comment-regex": "^1.1.0" } @@ -3360,12 +3360,12 @@ "is-url": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", - "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" + "integrity": "sha1-BKTfRtKMTP89c9Af8Gq+sxihqlI=" }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" + "integrity": "sha1-0YUOuXkezRjmGCzhKjDzlmNLsZ0=" }, "is-wsl": { "version": "1.1.0", @@ -3390,12 +3390,12 @@ "js-base64": { "version": "2.4.8", "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.4.8.tgz", - "integrity": "sha512-hm2nYpDrwoO/OzBhdcqs/XGT6XjSuSSCVEpia+Kl2J6x4CYt5hISlVL/AYU1khoDXv0AQVgxtdJySb9gjAn56Q==" + "integrity": "sha1-V6mxMIiPlWg0qkDFsWW6WcdY8DM=" }, "js-beautify": { "version": "1.7.5", "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.7.5.tgz", - "integrity": "sha512-9OhfAqGOrD7hoQBLJMTA+BKuKmoEtTJXzZ7WDF/9gvjtey1koVLuZqIY6c51aPDjbNdNtIXAkiWKVhziawE9Og==", + "integrity": "sha1-adllHvYNu2SfZVJ7U2dJUBOKeRk=", "requires": { "config-chain": "~1.1.5", "editorconfig": "^0.13.2", @@ -3411,7 +3411,7 @@ "js-yaml": { "version": "3.12.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "integrity": "sha1-6u1lbsg0TxD1J8a/obbiJE3hZ9E=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -3425,7 +3425,7 @@ "json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" + "integrity": "sha1-u4Z8+zRQ5pEHwTHRxRS6s9yLyqk=" }, "json5": { "version": "0.5.1", @@ -3452,7 +3452,7 @@ "lodash": { "version": "4.17.10", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", - "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==" + "integrity": "sha1-G3eTz3JZ6jj7NmHU04syYK+K5Oc=" }, "lodash.clone": { "version": "4.5.0", @@ -3477,7 +3477,7 @@ "log-symbols": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-2.2.0.tgz", - "integrity": "sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg==", + "integrity": "sha1-V0Dhxdbw39pK2TI7UzIQfva0xAo=", "requires": { "chalk": "^2.0.1" }, @@ -3485,7 +3485,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -3493,7 +3493,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -3508,7 +3508,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -3526,7 +3526,7 @@ "magic-string": { "version": "0.22.5", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.22.5.tgz", - "integrity": "sha512-oreip9rJZkzvA8Qzk9HFs8fZGF/u7H/gtrE8EN6RjKJ9kh2HlC+yQ2QezifqTZfGyiuAV0dRv5a+y/8gBb1m9w==", + "integrity": "sha1-jpz1r930Q4XB2lvCpqDb0QsDZX4=", "requires": { "vlq": "^0.2.2" } @@ -3561,7 +3561,7 @@ "mdn-data": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-1.1.4.tgz", - "integrity": "sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==" + "integrity": "sha1-ULXU/8RXUnZXPE7tuHgIEqhBnwE=" }, "merge-source-map": { "version": "1.0.4", @@ -3574,12 +3574,12 @@ "merge2": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.2.2.tgz", - "integrity": "sha512-bgM8twH86rWni21thii6WCMQMRMmwqqdW3sGWi9IipnVAszdLXRjwDwAnyrVXo6DuP3AjRMMttZKUB48QWIFGg==" + "integrity": "sha1-AyEuPajYbE2FI869YxgZNBT5TjQ=" }, "micromatch": { "version": "3.1.10", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "integrity": "sha1-cIWbyVyYQJUvNZoGij/En57PrCM=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -3599,14 +3599,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "miller-rabin": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "integrity": "sha1-8IA1HIZbDcViqEYpZtqlNUPHik0=", "requires": { "bn.js": "^4.0.0", "brorand": "^1.0.1" @@ -3615,17 +3615,17 @@ "mime": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", - "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" + "integrity": "sha1-Eh+evEnjdm8xGnbh+hyAA8SwOqY=" }, "mimic-fn": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==" + "integrity": "sha1-ggyGo5M0ZA6ZUWkovQP8qIBX0CI=" }, "minimalistic-assert": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + "integrity": "sha1-LhlN4ERibUoQ5/f7wAznPoPk1cc=" }, "minimalistic-crypto-utils": { "version": "1.0.1", @@ -3635,7 +3635,7 @@ "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", "requires": { "brace-expansion": "^1.1.7" } @@ -3648,7 +3648,7 @@ "mixin-deep": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz", - "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==", + "integrity": "sha1-pJ5yaNzhoNlpjkUybFYm3zVD0P4=", "requires": { "for-in": "^1.0.2", "is-extendable": "^1.0.1" @@ -3657,7 +3657,7 @@ "is-extendable": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "integrity": "sha1-p0cPnkJnM9gb2B4RVSZOOjUHyrQ=", "requires": { "is-plain-object": "^2.0.4" } @@ -3680,12 +3680,12 @@ "nan": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/nan/-/nan-2.10.0.tgz", - "integrity": "sha512-bAdJv7fBLhWC+/Bls0Oza+mvTaNQtP+1RyhhhvD95pgUJz6XM5IzgmxOkItJ9tkoCiplvAnXI1tNmmUD/eScyA==" + "integrity": "sha1-ltDNYQ69WNS03pzAxoKM2pnHVI8=" }, "nanomatch": { "version": "1.2.13", "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "integrity": "sha1-uHqKpPwN6P5r6IiVs4mD/yZb0Rk=", "requires": { "arr-diff": "^4.0.0", "array-unique": "^0.3.2", @@ -3703,24 +3703,24 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "nice-try": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.4.tgz", - "integrity": "sha512-2NpiFHqC87y/zFke0fC0spBXL3bBsoh/p5H1EFhshxjCR5+0g2d6BiXbUFz9v1sAcxsk2htp2eQnNIci2dIYcA==" + "integrity": "sha1-2Tli9sUvLBVYwPvabVEoGfHv4cQ=" }, "node-forge": { "version": "0.7.5", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.7.5.tgz", - "integrity": "sha512-MmbQJ2MTESTjt3Gi/3yG1wGpIMhUfcIypUCGtTizFR9IiccFwxSpfp0vtIZlkFclEqERemxfnSdZEMR9VqqEFQ==" + "integrity": "sha1-bBUsNFzhHFL0ZcKr2VfoY5zWdN8=" }, "node-libs-browser": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.1.0.tgz", - "integrity": "sha512-5AzFzdoIMb89hBGMZglEegffzgRg+ZFoUmisQ8HI4j1KDdpx13J0taNp2y9xPbur6W61gepGDDotGBVQ7mfUCg==", + "integrity": "sha1-X5QmPUBPbkR2fXJpAf/wVHjWAN8=", "requires": { "assert": "^1.1.1", "browserify-zlib": "^0.2.0", @@ -3757,7 +3757,7 @@ "node-releases": { "version": "1.0.0-alpha.10", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.0.0-alpha.10.tgz", - "integrity": "sha512-BSQrRgOfN6L/MoKIa7pRUc7dHvflCXMcqyTBvphixcSsgJTuUd24vAFONuNfVsuwTyz28S1HEc9XN6ZKylk4Hg==", + "integrity": "sha1-YcjV+bWy4F2E66lB0FtvUgL2iio=", "requires": { "semver": "^5.3.0" } @@ -3786,7 +3786,7 @@ "normalize-url": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.2.0.tgz", - "integrity": "sha512-WvF3Myk0NhXkG8S9bygFM4IC1KOvnVJGq0QoGeoqOYOBeinBZp5ybW3QuYbTc89lkWBMM9ZBO4QGRoc0353kKA==" + "integrity": "sha1-mNCUivyCgp83QyD0Bf6cpVpfhWc=" }, "nth-check": { "version": "1.0.1", @@ -3834,12 +3834,12 @@ "object-inspect": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.4.1.tgz", - "integrity": "sha512-wqdhLpfCUbEsoEwl3FXwGyv8ief1k/1aUdIPCqVnupM6e8l63BEJdiF/0swtn04/8p05tG/T0FrpTlfwvljOdw==" + "integrity": "sha1-N/+xDnGtrzdI0F9xO0yUUvQCy8Q=" }, "object-keys": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", - "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==" + "integrity": "sha1-CcU4VTd1dTEMymL1W7M0q/97PtI=" }, "object-visit": { "version": "1.0.1", @@ -3896,7 +3896,7 @@ "opn": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/opn/-/opn-5.3.0.tgz", - "integrity": "sha512-bYJHo/LOmoTd+pfiYhfZDnf9zekVJrY+cnS2a5F2x+w5ppvTqObojTP7WiFG+kVZs9Inw+qQ/lw7TroWwhdd2g==", + "integrity": "sha1-ZIcVZchjh18FLP31PT48ta21Oxw=", "requires": { "is-wsl": "^1.1.0" } @@ -3924,7 +3924,7 @@ "ora": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/ora/-/ora-2.1.0.tgz", - "integrity": "sha512-hNNlAd3gfv/iPmsNxYoAPLvxg7HuPozww7fFonMZvL84tP6Ox5igfk5j/+a9rtJJwqMgKK+JgWsAQik5o0HTLA==", + "integrity": "sha1-bK8oMOuSSUGGHsU6FzeZ4Ai1Hls=", "requires": { "chalk": "^2.3.1", "cli-cursor": "^2.1.0", @@ -3942,7 +3942,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -3950,7 +3950,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -3973,7 +3973,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -3998,12 +3998,12 @@ "pako": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.6.tgz", - "integrity": "sha512-lQe48YPsMJAig+yngZ87Lus+NF+3mtu7DVOBu6b/gHO1YpKwIj5AWjZ/TOS7i46HD/UixzWb1zeWDZfGZ3iYcg==" + "integrity": "sha1-AQEhG6pwxLykoPY/Igbpe3368lg=" }, "parcel-bundler": { "version": "1.9.7", "resolved": "https://registry.npmjs.org/parcel-bundler/-/parcel-bundler-1.9.7.tgz", - "integrity": "sha512-x+RiXe/C+aOoFuw+acH/NKjKmUJ/2zbFWFUS/KE5jBk2ErsN0Dc3OxLpmEaeIMU4oMPWFeNm5mRXcXdeUwf7GA==", + "integrity": "sha1-XNAIUN6gJU03cAXVWrS/YEKborw=", "requires": { "ansi-to-html": "^0.6.4", "babel-code-frame": "^6.26.0", @@ -4069,7 +4069,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -4077,7 +4077,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -4087,7 +4087,7 @@ "cross-spawn": { "version": "6.0.5", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "integrity": "sha1-Sl7Hxk364iw6FBJNus3uhG2Ay8Q=", "requires": { "nice-try": "^1.0.4", "path-key": "^2.0.1", @@ -4104,7 +4104,7 @@ "json5": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "integrity": "sha1-d5+wAYYE+oVOrL9iUhgNg1Q+Pb4=", "requires": { "minimist": "^1.2.0" } @@ -4117,7 +4117,7 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "strip-ansi": { "version": "4.0.0", @@ -4130,7 +4130,7 @@ "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -4140,7 +4140,7 @@ "parse-asn1": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.1.tgz", - "integrity": "sha512-KPx7flKXg775zZpnp9SxJlz00gTd4BmJ2yJufSc44gMCRrRQ7NSzAcSJQfifuOLgW6bEi+ftrALtsgALeB2Adw==", + "integrity": "sha1-9r8pOBgzK9DatU77Fgh3JHRebKg=", "requires": { "asn1.js": "^4.0.0", "browserify-aes": "^1.0.0", @@ -4182,12 +4182,12 @@ "path-parse": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "integrity": "sha1-1i27VnlAXXLEc37FhgDp3c8G0kw=" }, "pbkdf2": { "version": "3.0.16", "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.16.tgz", - "integrity": "sha512-y4CXP3thSxqf7c0qmOF+9UeOTrifiVTIM+u7NWlq+PRsHbr7r7dpCmvzrZxa96JJUNi0Y5w9VqG5ZNeCVMoDcA==", + "integrity": "sha1-dAQgjsawG2LYW/g4U6gGT42cKlw=", "requires": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -4209,7 +4209,7 @@ "postcss": { "version": "6.0.23", "resolved": "https://registry.npmjs.org/postcss/-/postcss-6.0.23.tgz", - "integrity": "sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag==", + "integrity": "sha1-YcgswyisYOZ3ZF+XkFTrmLwOMyQ=", "requires": { "chalk": "^2.4.1", "source-map": "^0.6.1", @@ -4219,7 +4219,7 @@ "ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "integrity": "sha1-QfuyAkPlCxK+DwS43tvwdSDOhB0=", "requires": { "color-convert": "^1.9.0" } @@ -4227,7 +4227,7 @@ "chalk": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "integrity": "sha1-GMSasWoDe26wFSzIPjRxM4IVtm4=", "requires": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -4242,12 +4242,12 @@ "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "supports-color": { "version": "5.4.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "integrity": "sha1-HGszdALCE3YF7+GfEP7DkPb6q1Q=", "requires": { "has-flag": "^3.0.0" } @@ -4280,7 +4280,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -4290,7 +4290,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" }, "electron-to-chromium": { "version": "1.3.55", @@ -4357,7 +4357,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -4378,7 +4378,7 @@ "postcss-filter-plugins": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/postcss-filter-plugins/-/postcss-filter-plugins-2.0.3.tgz", - "integrity": "sha512-T53GVFsdinJhgwm7rg1BzbeBRomOg9y5MBVhGcsV0CxurUdVj1UlPdKtn7aqYA/c/QVkzKMjq2bSV5dKG5+AwQ==", + "integrity": "sha1-giRf34IzcEFkXkdxFNjlk6oYuOw=", "requires": { "postcss": "^5.0.4" }, @@ -4391,7 +4391,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -4427,7 +4427,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -4448,7 +4448,7 @@ "postcss-merge-longhand": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.4.tgz", - "integrity": "sha512-wLi2u22mSdBDBjLF8pyaPCNppOmqb+B4O0Dlt/4nUwn79EltDUJmCeCDYqo7SB2z9puOHTftnxviY4J9xS+ygQ==", + "integrity": "sha1-v/x8b/oUZZHJk6C7g3PWX5oG1NA=", "requires": { "css-color-names": "0.0.4", "postcss": "^6.0.0", @@ -4472,7 +4472,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -4482,7 +4482,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" }, "electron-to-chromium": { "version": "1.3.55", @@ -4677,7 +4677,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -4709,7 +4709,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -4719,7 +4719,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" }, "electron-to-chromium": { "version": "1.3.55", @@ -4793,7 +4793,7 @@ "postcss": { "version": "5.2.18", "resolved": "https://registry.npmjs.org/postcss/-/postcss-5.2.18.tgz", - "integrity": "sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==", + "integrity": "sha1-ut+hSX1GJE9jkPWLMZgw2RB4U8U=", "requires": { "chalk": "^1.1.3", "js-base64": "^2.1.9", @@ -4814,7 +4814,7 @@ "posthtml": { "version": "0.11.3", "resolved": "https://registry.npmjs.org/posthtml/-/posthtml-0.11.3.tgz", - "integrity": "sha512-quMHnDckt2DQ9lRi6bYLnuyBDnVzK+McHa8+ar4kTdYbWEo/92hREOu3h70ZirudOOp/my2b3r0m5YtxY52yrA==", + "integrity": "sha1-F+opIbBVW3RV8zyXe9Fti4y3Tyc=", "requires": { "object-assign": "^4.1.1", "posthtml-parser": "^0.3.3", @@ -4832,7 +4832,7 @@ "posthtml-parser": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.3.3.tgz", - "integrity": "sha512-H/Z/yXGwl49A7hYQLV1iQ3h87NE0aZ/PMZhFwhw3lKeCAN+Ti4idrHvVvh4/GX10I7u77aQw+QB4vV5/Lzvv5A==", + "integrity": "sha1-P+mG/KnwDA8QnXMbpZCxkvJud20=", "requires": { "htmlparser2": "^3.9.2", "isobject": "^2.1.0", @@ -4844,7 +4844,7 @@ "posthtml-parser": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/posthtml-parser/-/posthtml-parser-0.4.1.tgz", - "integrity": "sha512-h7vXIQ21Ikz2w5wPClPakNP6mJeJCK6BT0GpqnQrNNABdR7/TchNlFyryL1Bz6Ww53YWCKkr6tdZuHlxY1AVdQ==", + "integrity": "sha1-lbeP73Zvu+Cm+GG26VWCvD0f+TM=", "requires": { "htmlparser2": "^3.9.2", "object-assign": "^4.1.1" @@ -4853,7 +4853,7 @@ "posthtml-render": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/posthtml-render/-/posthtml-render-1.1.4.tgz", - "integrity": "sha512-jL6eFIzoN3xUEvbo33OAkSDE2VIKU4JQ1wENOows1DpfnrdapR/K3Q1/fB43Mq7wQlcSgRm23nFrvoioufM7eA==" + "integrity": "sha1-ldrAmJL08YP61ayCPwj0LAJWVR4=" }, "prelude-ls": { "version": "1.1.2", @@ -4868,7 +4868,7 @@ "private": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==" + "integrity": "sha1-I4Hts2ifelPWUxkAYPz4ItLzaP8=" }, "process": { "version": "0.11.10", @@ -4878,7 +4878,7 @@ "process-nextick-args": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", - "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==" + "integrity": "sha1-o31zL0JxtKsa0HDTVQjoKQeI/6o=" }, "proto-list": { "version": "1.2.4", @@ -4893,7 +4893,7 @@ "public-encrypt": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.2.tgz", - "integrity": "sha512-4kJ5Esocg8X3h8YgJsKAuoesBgB7mqH3eowiDzMUPKiRDDE7E/BqqZD1hnTByIaAFiwAw246YEltSq7tdrOH0Q==", + "integrity": "sha1-RuuRByBr9zSJ+LhbadkTNMZhCZQ=", "requires": { "bn.js": "^4.1.0", "browserify-rsa": "^4.0.0", @@ -4946,7 +4946,7 @@ "randombytes": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.0.6.tgz", - "integrity": "sha512-CIQ5OFxf4Jou6uOKe9t1AOgqpeU5fd70A8NPdHSGeYXqXsPe6peOwI0cUl88RWZ6sP1vPMV3avd/R6cZ5/sP1A==", + "integrity": "sha1-0wLFIpSFiISKjTAMkytEwkIx2oA=", "requires": { "safe-buffer": "^5.1.0" } @@ -4954,7 +4954,7 @@ "randomfill": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "integrity": "sha1-ySGW/IarQr6YPxvzF3giSTHWFFg=", "requires": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" @@ -4968,7 +4968,7 @@ "readable-stream": { "version": "2.3.6", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "integrity": "sha1-sRwn2IuP8fvgcGQ8+UsMea4bCq8=", "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -4993,7 +4993,7 @@ "reduce-css-calc": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.4.tgz", - "integrity": "sha512-i/vWQbyd3aJRmip9OVSN9V6nIjLf/gg/ctxb0CpvHWtcRysFl/ngDBQD+rqavxdw/doScA3GMBXhzkHQ4GCzFQ==", + "integrity": "sha1-wg6c2oRFrXPU/0vqlgxvg1N5Fwg=", "requires": { "css-unit-converter": "^1.1.1", "postcss-value-parser": "^3.3.0" @@ -5017,17 +5017,17 @@ "regenerate": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", - "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" + "integrity": "sha1-SoVuxLVuQHfFV1icroXnpMiGmhE=" }, "regenerator-runtime": { "version": "0.11.1", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", - "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" + "integrity": "sha1-vgWtf5v30i4Fb5cmzuUBf78Z4uk=" }, "regenerator-transform": { "version": "0.10.1", "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", - "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", + "integrity": "sha1-HkmWg3Ix2ot/PPQRTXG1aRoGgN0=", "requires": { "babel-runtime": "^6.18.0", "babel-types": "^6.19.0", @@ -5037,7 +5037,7 @@ "regex-not": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "integrity": "sha1-H07OJ+ALC2XgJHpoEOaoXYOldSw=", "requires": { "extend-shallow": "^3.0.2", "safe-regex": "^1.1.0" @@ -5099,7 +5099,7 @@ "resolve": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.8.1.tgz", - "integrity": "sha512-AicPrAC7Qu1JxPCZ9ZgCZlY35QgFnNqc+0LtbRNxnVw4TXvjQ72wnuL9JQcEBgXkI9JM8MsT9kaQoHcpCRJOYA==", + "integrity": "sha1-gvHsGaQjrB+9CAsLqwa6NuhKeiY=", "requires": { "path-parse": "^1.0.5" } @@ -5121,7 +5121,7 @@ "ret": { "version": "0.1.15", "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" + "integrity": "sha1-uKSCXVvbH8P29Twrwz+BOIaBx7w=" }, "rgb-regex": { "version": "1.0.1", @@ -5136,7 +5136,7 @@ "ripemd160": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "integrity": "sha1-ocGm9iR1FXe6XQeRTLyShQWFiQw=", "requires": { "hash-base": "^3.0.0", "inherits": "^2.0.1" @@ -5145,7 +5145,7 @@ "safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "integrity": "sha1-mR7GnSluAxN0fVm9/St0XDX4go0=" }, "safe-regex": { "version": "1.1.0", @@ -5158,7 +5158,7 @@ "safer-eval": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/safer-eval/-/safer-eval-1.2.3.tgz", - "integrity": "sha512-nDwXOhiheoaBT6op02n8wzsshjLXHhh4YAeqsDEoVmy1k2+lGv/ENLsGaWqkaKArUkUx48VO12/ZPa3sI/OEqQ==", + "integrity": "sha1-c7p0o0vIoH1qRBNcgV/Rio7r56A=", "requires": { "clones": "^1.1.0" } @@ -5166,17 +5166,17 @@ "sax": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + "integrity": "sha1-KBYjTiN4vdxOU1T6tcqold9xANk=" }, "semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", - "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==" + "integrity": "sha1-3Eu8emyp2Rbe5dQ1FvAJK1j3uKs=" }, "send": { "version": "0.16.2", "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", - "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "integrity": "sha1-bsyh4PjBVtFBWXVZhI32RzCmu8E=", "requires": { "debug": "2.6.9", "depd": "~1.1.2", @@ -5196,7 +5196,7 @@ "serialize-to-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/serialize-to-js/-/serialize-to-js-1.2.1.tgz", - "integrity": "sha512-TK6d30GNkOLeFDPuP6Jfy1Q1V31GxzppYTt2lzr8KWmIUKomFj+260QP5o4AhHLu0pr6urgyS8i/Z1PqurjBoA==", + "integrity": "sha1-Lof2H5OIJtJMRjp8vQ3Skp7DgAg=", "requires": { "js-beautify": "^1.7.5", "safer-eval": "^1.2.3" @@ -5205,7 +5205,7 @@ "serve-static": { "version": "1.13.2", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", - "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", + "integrity": "sha1-CV6Ecv1bRiN9tQzkhqQ/S4bGzsE=", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -5221,7 +5221,7 @@ "set-value": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", - "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "integrity": "sha1-ca5KiPD+77v1LR6mBPP7MV67YnQ=", "requires": { "extend-shallow": "^2.0.1", "is-extendable": "^0.1.1", @@ -5247,12 +5247,12 @@ "setprototypeof": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + "integrity": "sha1-0L2FU2iHtv58DYGMuWLZ2RxU5lY=" }, "sha.js": { "version": "2.4.11", "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "integrity": "sha1-N6XPC4HsvGlD3hCbopYNGyZYSuc=", "requires": { "inherits": "^2.0.1", "safe-buffer": "^5.0.1" @@ -5300,7 +5300,7 @@ "is-arrayish": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + "integrity": "sha1-RXSirlb3qyBolvtDHq7tBm/fjwM=" } } }, @@ -5312,7 +5312,7 @@ "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "integrity": "sha1-ZJIufFZbDhQgS6GqfWlkJ40lGC0=", "requires": { "base": "^0.11.1", "debug": "^2.2.0", @@ -5345,7 +5345,7 @@ "snapdragon-node": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "integrity": "sha1-bBdfhv8UvbByRWPo88GwIaKGhTs=", "requires": { "define-property": "^1.0.0", "isobject": "^3.0.0", @@ -5363,7 +5363,7 @@ "is-accessor-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "integrity": "sha1-FpwvbT3x+ZJhgHI2XJsOofaHhlY=", "requires": { "kind-of": "^6.0.0" } @@ -5371,7 +5371,7 @@ "is-data-descriptor": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "integrity": "sha1-2Eh2Mh0Oet0DmQQGq7u9NrqSaMc=", "requires": { "kind-of": "^6.0.0" } @@ -5379,7 +5379,7 @@ "is-descriptor": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "integrity": "sha1-OxWXRqZmBLBPjIFSS6NlxfFNhuw=", "requires": { "is-accessor-descriptor": "^1.0.0", "is-data-descriptor": "^1.0.0", @@ -5389,14 +5389,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "snapdragon-util": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "integrity": "sha1-+VZHlIbyrNeXAGk/b3uAXkWrVuI=", "requires": { "kind-of": "^3.2.0" } @@ -5417,7 +5417,7 @@ "source-map-resolve": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz", - "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==", + "integrity": "sha1-cuLMNAlVQ+Q7LGKyxMENSpBU8lk=", "requires": { "atob": "^2.1.1", "decode-uri-component": "^0.2.0", @@ -5429,7 +5429,7 @@ "source-map-support": { "version": "0.4.18", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", - "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", + "integrity": "sha1-Aoam3ovkJkEzhZTpfM6nXwosWF8=", "requires": { "source-map": "^0.5.6" } @@ -5442,7 +5442,7 @@ "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "integrity": "sha1-fLCd2jqGWFcFxks5pkZgOGguj+I=", "requires": { "extend-shallow": "^3.0.0" } @@ -5455,12 +5455,12 @@ "stable": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" + "integrity": "sha1-g26zyDgv4pNv6vVEYxAXzn1Ho88=" }, "static-eval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.0.tgz", - "integrity": "sha512-6flshd3F1Gwm+Ksxq463LtFd1liC77N/PX1FVVc3OzL3hAmo2fwHFbuArkcfi7s9rTNsLEhcRmXGFZhlgy40uw==", + "integrity": "sha1-DoIfiSaEfe97S1DNpdVcBKmxOGQ=", "requires": { "escodegen": "^1.8.1" } @@ -5487,7 +5487,7 @@ "static-module": { "version": "2.2.5", "resolved": "https://registry.npmjs.org/static-module/-/static-module-2.2.5.tgz", - "integrity": "sha512-D8vv82E/Kpmz3TXHKG8PPsCPg+RAX6cbCOyvjM6x04qZtQ47EtJFVwRsdov3n5d6/6ynrOY9XB4JkaZwB2xoRQ==", + "integrity": "sha1-vUCrzq4z2mt6+4Sg5DKf+IUr+78=", "requires": { "concat-stream": "~1.6.0", "convert-source-map": "^1.5.1", @@ -5508,7 +5508,7 @@ "statuses": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" + "integrity": "sha1-u3PURtonlhBu/MG2AaJT1sRr0Ic=" }, "stream-browserify": { "version": "2.0.1", @@ -5522,7 +5522,7 @@ "stream-http": { "version": "2.8.3", "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", - "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "integrity": "sha1-stJCRpKIpaJ+xP6JM6z2I95lFPw=", "requires": { "builtin-status-codes": "^3.0.0", "inherits": "^2.0.1", @@ -5539,7 +5539,7 @@ "string_decoder": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "integrity": "sha1-nPFhG6YmhdcDCunkujQUnDrwP8g=", "requires": { "safe-buffer": "~5.1.0" } @@ -5565,7 +5565,7 @@ "browserslist": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.0.1.tgz", - "integrity": "sha512-QqiiIWchEIkney3wY53/huI7ZErouNAdvOkjorUALAwRcu3tEwOV3Sh6He0DnP38mz1JjBpCBb50jQBmaYuHPw==", + "integrity": "sha1-YcBc4qWEPH2WFmQIvCPVi1QW6Bg=", "requires": { "caniuse-lite": "^1.0.30000865", "electron-to-chromium": "^1.3.52", @@ -5575,7 +5575,7 @@ "caniuse-lite": { "version": "1.0.30000874", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30000874.tgz", - "integrity": "sha512-29nr1EPiHwrJTAHHsEmTt2h+55L8j2GNFdAcYPlRy2NX6iFz7ZZiepVI7kP/QqlnHLq3KvfWpbmGa0d063U09w==" + "integrity": "sha1-pkGx8cQg1Y2bEykg72uoe73NIiM=" }, "electron-to-chromium": { "version": "1.3.55", @@ -5602,7 +5602,7 @@ "svgo": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.0.5.tgz", - "integrity": "sha512-nYrifviB77aNKDNKKyuay3M9aYiK6Hv5gJVDdjj2ZXTQmI8WZc8+UPLR5IpVlktJfSu3co/4XcWgrgI6seGBPg==", + "integrity": "sha1-cEA2TAYqBTirrP9EAc6momp6OJo=", "requires": { "coa": "~2.0.1", "colors": "~1.1.2", @@ -5623,7 +5623,7 @@ "js-yaml": { "version": "3.10.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.10.0.tgz", - "integrity": "sha512-O2v52ffjLa9VeM43J4XocZE//WT9N0IiwDa3KSHH7Tu8CtH+1qM8SIZvnsTh6v+4yFy5KUY3BHUVwjpfAWsjIA==", + "integrity": "sha1-LnhEFka9RoLpY/IrbpKCPDCcYtw=", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -5634,7 +5634,7 @@ "terser": { "version": "3.8.1", "resolved": "https://registry.npmjs.org/terser/-/terser-3.8.1.tgz", - "integrity": "sha512-FRin3gKQ0vm0xPPLuxw1FqpVgv1b2pBpYCaFb5qe6A7sD749Fnq1VbDiX3CEFM0BV0fqDzFtBfgmxhxCdzKQIg==", + "integrity": "sha1-y3AHCsngpxrdFp37Y8CmT8onOKw=", "requires": { "commander": "~2.16.0", "source-map": "~0.6.1", @@ -5644,17 +5644,17 @@ "commander": { "version": "2.16.0", "resolved": "https://registry.npmjs.org/commander/-/commander-2.16.0.tgz", - "integrity": "sha512-sVXqklSaotK9at437sFlFpyOcJonxe0yST/AG9DkQKUdIE6IqGIMv4SfAQSKaJbSdVEJYItASCrBiVQHq1HQew==" + "integrity": "sha1-8WOQWTmWzrTz7rAgsx14Uo9/ilA=" }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + "integrity": "sha1-dHIq8y6WFOnCh6jQu95IteLxomM=" }, "source-map-support": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.6.tgz", - "integrity": "sha512-N4KXEz7jcKqPf2b2vZF11lQIz9W5ZMuUcIOGj243lduidkf2fjkVKJS9vNxVWn3u/uxX38AcE8U9nnH9FPcq+g==", + "integrity": "sha1-RDXO5Gsaq2K46GEM5g94gJHFHBM=", "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -5674,7 +5674,7 @@ "timers-browserify": { "version": "2.0.10", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.10.tgz", - "integrity": "sha512-YvC1SV1XdOUaL6gx5CoGroT3Gu49pK9+TZ38ErPldOWW4j49GI1HKs9DV+KGq/w6y+LZ72W1c8cKz2vzY+qpzg==", + "integrity": "sha1-HSjj0qrfHVpZlsTp+VYBzQU0gK4=", "requires": { "setimmediate": "^1.0.4" } @@ -5710,7 +5710,7 @@ "to-regex": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "integrity": "sha1-E8/dmzNlUvMLUfM6iuG0Knp1mc4=", "requires": { "define-property": "^2.0.2", "extend-shallow": "^3.0.2", @@ -5730,12 +5730,12 @@ "toml": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/toml/-/toml-2.3.3.tgz", - "integrity": "sha512-O7L5hhSQHxuufWUdcTRPfuTh3phKfAZ/dqfxZFoxPCj2RYmpaSGLEIs016FCXItQwNr08yefUB5TSjzRYnajTA==" + "integrity": "sha1-jWg9cpV3yyhiMd/HqK/+WNMXKPs=" }, "tomlify-j0.4": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/tomlify-j0.4/-/tomlify-j0.4-3.0.0.tgz", - "integrity": "sha512-2Ulkc8T7mXJ2l0W476YC/A209PR38Nw8PuaCNtk9uI3t1zzFdGQeWYGQvmj2PZkVvRC/Yoi4xQKMRnWc/N29tQ==" + "integrity": "sha1-mUFNRSaMOjuL84voIUW3u6NLdHM=" }, "trim-right": { "version": "1.0.1", @@ -5862,7 +5862,7 @@ "upath": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/upath/-/upath-1.1.0.tgz", - "integrity": "sha512-bzpH/oBhoS/QI/YtbkqCg6VEiPYjSZtrHQM6/QnJS6OL9pKUFLqb3aFh4Scvwm45+7iAgiMkLhSbaZxUqmrprw==" + "integrity": "sha1-NSVll+RqWB20eT0M5H+prr/J+r0=" }, "urix": { "version": "0.1.0", @@ -5888,7 +5888,7 @@ "use": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/use/-/use-3.1.0.tgz", - "integrity": "sha512-6UJEQM/L+mzC3ZJNM56Q4DFGLX/evKGRg15UJHGB9X5j5Z3AFbgZvjUh2yq/UJUY4U5dh7Fal++XbNg1uzpRAw==", + "integrity": "sha1-FHFr8D/f79AwQK71jYtLhfOnxUQ=", "requires": { "kind-of": "^6.0.2" }, @@ -5896,14 +5896,14 @@ "kind-of": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + "integrity": "sha1-ARRrNqYhjmTljzqNZt5df8b20FE=" } } }, "util": { "version": "0.10.4", "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz", - "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==", + "integrity": "sha1-OqASW/5mikZy3liFfTrOJ+y3aQE=", "requires": { "inherits": "2.0.3" } @@ -5916,7 +5916,7 @@ "util.promisify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", + "integrity": "sha1-RA9xZaRZyaFtwUXrjnLzVocJcDA=", "requires": { "define-properties": "^1.1.2", "object.getownpropertydescriptors": "^2.0.3" @@ -5925,17 +5925,17 @@ "v8-compile-cache": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.0.0.tgz", - "integrity": "sha512-qNdTUMaCjPs4eEnM3W9H94R3sU70YCuT+/ST7nUf+id1bVOrdjrpUaeZLqPBPRph3hsgn4a4BvwpxhHZx+oSDg==" + "integrity": "sha1-UmSS41/GFoZChHALcEPgG67gnwo=" }, "vendors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.2.tgz", - "integrity": "sha512-w/hry/368nO21AN9QljsaIhb9ZiZtZARoVH5f3CsFbawdLdayCgKRPup7CggujvySMxx0I91NOyxdVENohprLQ==" + "integrity": "sha1-f8te759WI7FWvOqJ7DfWNnbyGAE=" }, "vlq": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", - "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==" + "integrity": "sha1-jz5DKM9jsVQMDWfhsneDhviXWyY=" }, "vm-browserify": { "version": "0.0.4", @@ -5961,7 +5961,7 @@ "which": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "integrity": "sha1-pFBD1U9YBTFtqNYvn1CRjT2nCwo=", "requires": { "isexe": "^2.0.0" } @@ -5969,7 +5969,7 @@ "ws": { "version": "5.2.2", "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "integrity": "sha1-3/7xSGa46NyRM1glFNG++vlumA8=", "requires": { "async-limiter": "~1.0.0" } diff --git a/shuup_tests/browser/admin/test_menu.py b/shuup_tests/browser/admin/test_menu.py index 37ac2b6ad3..a0d26120de 100644 --- a/shuup_tests/browser/admin/test_menu.py +++ b/shuup_tests/browser/admin/test_menu.py @@ -82,11 +82,29 @@ def test_menu_toggle(browser, admin_user, live_server, settings): wait_until_condition(browser, lambda x: x.is_text_present("Welcome!")) wait_until_condition(browser, lambda x: x.is_text_present("Quicklinks")) - browser.find_by_css("#menu-button").first.click() - url = live_server + "/sa" - browser.visit(url) - assert browser.find_by_css(".desktop-menu-closed") + wait_until_condition(browser, lambda x: x.is_element_present_by_css("#menu-button")) + # Close menu + try: + browser.find_by_css("#menu-button").first.click() + except selenium.common.exceptions.TimeoutException as e: + browser.find_by_css("#menu-button").first.click() + wait_until_condition(browser, lambda x: x.is_element_present_by_css(".desktop-menu-closed")) + + url = reverse("shuup_admin:order.list") + browser.visit("%s%s" % (live_server, url)) + wait_until_condition(browser, condition=lambda x: x.is_text_present("Orders")) + + # Should be closed after page load + wait_until_condition(browser, lambda x: x.is_element_present_by_css(".desktop-menu-closed")) + + # Open menu browser.find_by_css("#menu-button").first.click() - browser.visit(url) - assert not browser.find_by_css(".desktop-menu-closed") + wait_until_condition(browser, lambda x: not x.is_element_present_by_css(".desktop-menu-closed")) + + url = reverse("shuup_admin:shop_product.list") + browser.visit("%s%s" % (live_server, url)) + wait_until_condition(browser, condition=lambda x: x.is_text_present("Products")) + + # Should be still open after page load + wait_until_condition(browser, lambda x: not x.is_element_present_by_css(".desktop-menu-closed"))
pallets__click-123
Automatically dedent help text of options? ``` import click @click.command() @click.option('--foo', help=""" heyho i am multiline """) def cli(foo): click.echo(foo) cli() ``` This currently does not remove the leading whitespace from each paragraph in the help text for `--foo`: ``` untitaker@untibox:~/projects/click, branch master $ python lol.py --help Usage: lol.py [OPTIONS] Options: --foo TEXT heyho i am multiline --help Show this message and exit. ``` Although i am not sure if help texts for options are even supposed to get that complex.
[ { "content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom .utils import echo\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n f.__click_pass_context__ = True\n return f\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n attrs.setdefault('help', help)\n return cls(name=name or f.__name__.lower(),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n \"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n return _make_command(f, name, attrs, cls)\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Argument(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n module = sys._getframe(1).f_globals.get('__name__')\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n })\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help())\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py" } ]
[ { "content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom .utils import echo\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n f.__click_pass_context__ = True\n return f\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n attrs.setdefault('help', help)\n return cls(name=name or f.__name__.lower(),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n \"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n return _make_command(f, name, attrs, cls)\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Argument(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n if 'help' in attrs:\n attrs['help'] = inspect.cleandoc(attrs['help'])\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n module = sys._getframe(1).f_globals.get('__name__')\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n })\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help())\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py" } ]
diff --git a/click/decorators.py b/click/decorators.py index 4b81ac50f..25623f3f7 100644 --- a/click/decorators.py +++ b/click/decorators.py @@ -150,6 +150,8 @@ def option(*param_decls, **attrs): :attr:`Command.params` list. """ def decorator(f): + if 'help' in attrs: + attrs['help'] = inspect.cleandoc(attrs['help']) _param_memo(f, Option(param_decls, **attrs)) return f return decorator diff --git a/tests/test_options.py b/tests/test_options.py index d6ed48303..ec34cc300 100644 --- a/tests/test_options.py +++ b/tests/test_options.py @@ -169,3 +169,23 @@ def cmd(foo): assert result.exit_code == 2 assert 'Error: Missing option "--foo". Choose from foo, bar.' \ in result.output + + +def test_multiline_help(runner): + @click.command() + @click.option('--foo', help=""" + hello + + i am + + multiline + """) + def cmd(foo): + click.echo(foo) + + result = runner.invoke(cmd, ['--help']) + assert result.exit_code == 0 + out = result.output.splitlines() + assert ' --foo TEXT hello' in out + assert ' i am' in out + assert ' multiline' in out
dpgmediamagazines__django-arctic-341
QuickFiltersSelectMultiple add default attrs For quick filters Arctic has 2 widget: `QuickFiltersSelect` and `QuickFiltersSelectMultiple`. They use same template and it requires `select_multiple` property in `attrs` to make it work checkbox-like. It would be much easier to have this attribute be set by deafult in `QuickFiltersSelectMultiple` to do not specify it each time manually.
[ { "content": "from datetime import datetime\n\nimport django\nfrom django.forms.widgets import (\n ClearableFileInput,\n DateInput,\n DateTimeInput,\n Select,\n SelectMultiple,\n TextInput,\n TimeInput,\n Input,\n CheckboxSelectMultiple,\n RadioSelect,\n)\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\n\n\nclass StyledSelect(Select):\n def render(self, name, value, attrs=None, renderer=None):\n try:\n select_render = super(StyledSelect, self).render(\n name, value, attrs, renderer\n )\n except TypeError: # versions older than Django 1.11\n select_render = super(StyledSelect, self).render(\n name, value, attrs\n )\n\n return mark_safe(\n '<div class=\"styled-select\">{}</div>'.format(select_render)\n )\n\n\nclass Selectize(Select):\n def __init__(self, attrs={}, choices=()):\n attrs[\"js-selectize\"] = True\n super(Selectize, self).__init__(attrs, choices)\n\n\nclass SelectizeMultiple(SelectMultiple):\n def __init__(self, attrs={}, choices=()):\n attrs[\"js-selectize-multiple\"] = True\n super(SelectizeMultiple, self).__init__(attrs, choices)\n\n\nclass SelectizeAutoComplete(Select):\n def __init__(self, url, attrs={}, choices=()):\n attrs[\"js-selectize-autocomplete\"] = True\n attrs[\"data-url\"] = url\n super(SelectizeAutoComplete, self).__init__(attrs, choices)\n\n\nclass PickerFormatMixin(Input):\n \"\"\"\n Handle formatting of widget input value\n\n Attributes:\n display_format(str): string that will\n be used to format input value before render\n\n widget_attribute_key(str): represents attribute name\n to which formatted input value will be assigned\n \"\"\"\n\n display_format = None\n widget_attribute_key = None\n\n def get_context(self, name, value, attrs):\n context = super(PickerFormatMixin, self).get_context(\n name, value, attrs\n )\n if isinstance(value, datetime):\n value = value.strftime(self.display_format)\n context[\"widget\"][\"attrs\"][self.widget_attribute_key] = value\n return context\n\n\nclass DateTimePickerInput(PickerFormatMixin, DateTimeInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-datetimepicker\"] = True\n self.display_format = \"%m/%d/%Y %I:%M %p\"\n self.widget_attribute_key = \"data-datetime\"\n super(DateTimePickerInput, self).__init__(attrs, format)\n\n\nclass DatePickerInput(PickerFormatMixin, DateInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-datepicker\"] = True\n self.display_format = \"%m/%d/%Y\"\n self.widget_attribute_key = \"data-date\"\n super(DatePickerInput, self).__init__(attrs, format)\n\n\nclass TimePickerInput(PickerFormatMixin, TimeInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-timepicker\"] = True\n self.display_format = \"%I:%M %p\"\n self.widget_attribute_key = \"data-time\"\n super(TimePickerInput, self).__init__(attrs, format)\n\n\nclass QuickFiltersSelectMixin(object):\n template_name = \"arctic/widgets/quick_filters_select.html\"\n\n def get_context(self, name, value, attrs=None, *args, **kwargs):\n if django.VERSION >= (1, 11):\n return super(QuickFiltersSelectMixin, self).get_context(\n name, value, attrs\n )\n else:\n # django 1.10 doesn't support optgroups\n # and render choices in method\n context = {\"widget\": self, \"attrs\": attrs}\n optgroups = []\n for val, label in self.choices:\n option = {\n \"name\": name,\n \"value\": val,\n \"selected\": val in value,\n \"label\": label,\n }\n optgroups.append((None, [option], None))\n context[\"widget\"].optgroups = optgroups\n return context\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(QuickFiltersSelectMixin, self).render(\n name, value, attrs\n )\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n\n\nclass QuickFiltersSelect(QuickFiltersSelectMixin, RadioSelect):\n \"\"\"\n This widget is used when you want select only one active filter\n \"\"\"\n\n\nclass QuickFiltersSelectMultiple(\n QuickFiltersSelectMixin, CheckboxSelectMultiple\n):\n \"\"\"\n This widget is used to be able to have a more than one active filters\n \"\"\"\n\n\nclass SearchInput(TextInput):\n \"\"\"\n Widget used in the inline search field on top of ListViews\n \"\"\"\n\n template_name = \"arctic/widgets/search_input.html\"\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(SearchInput, self).render(name, value, attrs)\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n\n\nclass BetterFileInput(ClearableFileInput):\n \"\"\"\n File input replacement with Image preview\n \"\"\"\n\n template_name = \"arctic/widgets/file_input.html\"\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(BetterFileInput, self).render(name, value, attrs)\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n", "path": "arctic/widgets.py" } ]
[ { "content": "from datetime import datetime\n\nimport django\nfrom django.forms.widgets import (\n ClearableFileInput,\n DateInput,\n DateTimeInput,\n Select,\n SelectMultiple,\n TextInput,\n TimeInput,\n Input,\n CheckboxSelectMultiple,\n RadioSelect,\n)\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\n\n\nclass StyledSelect(Select):\n def render(self, name, value, attrs=None, renderer=None):\n try:\n select_render = super(StyledSelect, self).render(\n name, value, attrs, renderer\n )\n except TypeError: # versions older than Django 1.11\n select_render = super(StyledSelect, self).render(\n name, value, attrs\n )\n\n return mark_safe(\n '<div class=\"styled-select\">{}</div>'.format(select_render)\n )\n\n\nclass Selectize(Select):\n def __init__(self, attrs={}, choices=()):\n attrs[\"js-selectize\"] = True\n super(Selectize, self).__init__(attrs, choices)\n\n\nclass SelectizeMultiple(SelectMultiple):\n def __init__(self, attrs={}, choices=()):\n attrs[\"js-selectize-multiple\"] = True\n super(SelectizeMultiple, self).__init__(attrs, choices)\n\n\nclass SelectizeAutoComplete(Select):\n def __init__(self, url, attrs={}, choices=()):\n attrs[\"js-selectize-autocomplete\"] = True\n attrs[\"data-url\"] = url\n super(SelectizeAutoComplete, self).__init__(attrs, choices)\n\n\nclass PickerFormatMixin(Input):\n \"\"\"\n Handle formatting of widget input value\n\n Attributes:\n display_format(str): string that will\n be used to format input value before render\n\n widget_attribute_key(str): represents attribute name\n to which formatted input value will be assigned\n \"\"\"\n\n display_format = None\n widget_attribute_key = None\n\n def get_context(self, name, value, attrs):\n context = super(PickerFormatMixin, self).get_context(\n name, value, attrs\n )\n if isinstance(value, datetime):\n value = value.strftime(self.display_format)\n context[\"widget\"][\"attrs\"][self.widget_attribute_key] = value\n return context\n\n\nclass DateTimePickerInput(PickerFormatMixin, DateTimeInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-datetimepicker\"] = True\n self.display_format = \"%m/%d/%Y %I:%M %p\"\n self.widget_attribute_key = \"data-datetime\"\n super(DateTimePickerInput, self).__init__(attrs, format)\n\n\nclass DatePickerInput(PickerFormatMixin, DateInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-datepicker\"] = True\n self.display_format = \"%m/%d/%Y\"\n self.widget_attribute_key = \"data-date\"\n super(DatePickerInput, self).__init__(attrs, format)\n\n\nclass TimePickerInput(PickerFormatMixin, TimeInput):\n def __init__(self, attrs={}, format=None):\n attrs[\"js-timepicker\"] = True\n self.display_format = \"%I:%M %p\"\n self.widget_attribute_key = \"data-time\"\n super(TimePickerInput, self).__init__(attrs, format)\n\n\nclass QuickFiltersSelectMixin(object):\n template_name = \"arctic/widgets/quick_filters_select.html\"\n\n def get_context(self, name, value, attrs=None, *args, **kwargs):\n if django.VERSION >= (1, 11):\n return super(QuickFiltersSelectMixin, self).get_context(\n name, value, attrs\n )\n else:\n # django 1.10 doesn't support optgroups\n # and render choices in method\n context = {\"widget\": self, \"attrs\": attrs}\n optgroups = []\n for val, label in self.choices:\n option = {\n \"name\": name,\n \"value\": val,\n \"selected\": val in value,\n \"label\": label,\n }\n optgroups.append((None, [option], None))\n context[\"widget\"].optgroups = optgroups\n return context\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(QuickFiltersSelectMixin, self).render(\n name, value, attrs\n )\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n\n\nclass QuickFiltersSelect(QuickFiltersSelectMixin, RadioSelect):\n \"\"\"\n This widget is used when you want select only one active filter\n \"\"\"\n\n\nclass QuickFiltersSelectMultiple(\n QuickFiltersSelectMixin, CheckboxSelectMultiple\n):\n \"\"\"\n This widget is used to be able to have a more than one active filters\n \"\"\"\n def __init__(self, attrs=None, **kwargs):\n attrs = attrs or {}\n attrs['select_multiple'] = True\n super().__init__(attrs, **kwargs)\n\n\nclass SearchInput(TextInput):\n \"\"\"\n Widget used in the inline search field on top of ListViews\n \"\"\"\n\n template_name = \"arctic/widgets/search_input.html\"\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(SearchInput, self).render(name, value, attrs)\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n\n\nclass BetterFileInput(ClearableFileInput):\n \"\"\"\n File input replacement with Image preview\n \"\"\"\n\n template_name = \"arctic/widgets/file_input.html\"\n\n def render(self, name, value, attrs=None, renderer=None):\n \"\"\"For django 1.10 compatibility\"\"\"\n if django.VERSION >= (1, 11):\n return super(BetterFileInput, self).render(name, value, attrs)\n\n t = render_to_string(\n template_name=self.template_name,\n context=self.get_context(name, value, attrs),\n )\n return mark_safe(t)\n", "path": "arctic/widgets.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d0e637e..d1050210 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,8 +13,10 @@ Always reference the ticket number at the end of the issue description. ### Fixed - BREAKING! Fixed response in DeletionMixin after `delete` method is called [#334][334] +- Added default `select_multiple` attr for `QuickFiltersSelectMultiple` widget [#340][340] [334]: //github.com/sanoma/django-arctic/issues/334 +[340]: //github.com/sanoma/django-arctic/issues/340 ## 1.3.6 (2019-01-22) diff --git a/arctic/widgets.py b/arctic/widgets.py index 2b1982da..f8c3989f 100644 --- a/arctic/widgets.py +++ b/arctic/widgets.py @@ -151,6 +151,10 @@ class QuickFiltersSelectMultiple( """ This widget is used to be able to have a more than one active filters """ + def __init__(self, attrs=None, **kwargs): + attrs = attrs or {} + attrs['select_multiple'] = True + super().__init__(attrs, **kwargs) class SearchInput(TextInput): diff --git a/tests/test_layout_mixin.py b/tests/test_layout_mixin.py index 69c3357a..a30d50f0 100644 --- a/tests/test_layout_mixin.py +++ b/tests/test_layout_mixin.py @@ -101,7 +101,7 @@ def test_layout_example_4(layout): assert layout[0]['fieldset']['title'] == 'fieldset' assert layout[0]['fieldset']['description'] is None - assert layout[0]['fieldset']['collapsible'] is 'closed' + assert layout[0]['fieldset']['collapsible'] == 'closed' assert layout[0]['rows'][0]['name'] == 'title' assert layout[0]['rows'][0]['column'] is None assert layout[0]['rows'][1]['name'] == 'title' @@ -121,6 +121,6 @@ def test_layout_example_4(layout): assert layout[2]['fieldset']['title'] == 'fieldset3' assert layout[2]['fieldset']['description'] is None - assert layout[2]['fieldset']['collapsible'] is 'open' + assert layout[2]['fieldset']['collapsible'] == 'open' assert layout[2]['rows'][0]['name'] == 'published' assert layout[2]['rows'][0]['column'] is None
pytorch__vision-2612
Unable to build with Spack/from source, ffmpeg libraries not found ## 🐛 Bug I'm trying to install `torchvision` from source using the [Spack](https://spack.io) package manager, but the build can't find my `ffmpeg` libraries, which are in a non-standard location. There are a couple of reasons for this: 1. Mismatch of compilers. At some points of the build, it is using the compiler I defined in `CXX`, which is a compiler wrapper designed to automatically inject the appropriate linker args. At other points of the build, it is using the system compiler, possibly picked up from Python's distutils, which is not a compiler wrapper. 2. `TORCHVISION_LIBRARY` seems to be ignored. The README says to set `TORCHVISION_LIBRARY` with the path to all dependency libraries, but this isn't working either. ## To Reproduce ```console $ spack install -v py-torchvision ... [15/15] /Users/Adam/spack/lib/spack/env/clang/clang++ -MMD -MF /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/video_reader/VideoReader.o.d -Wno-unused-result -Wsign-compare -Wunreachable-code -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -I/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder -I/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/video_reader -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/ffmpeg-4.2.2-hoilb6sa2j6ggatrb2xvmuazqrtgcoa7/include -I/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/include -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/include/TH -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/include/THC -I/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/python-3.7.8-skt55xepunmc5rj2diqoljahpau3zwrm/include/python3.7m -c -c /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/video_reader/VideoReader.cpp -o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/video_reader/VideoReader.o -std=c++14 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=video_reader -D_GLIBCXX_USE_CXX11_ABI=0 ... /usr/bin/clang++ -bundle -undefined dynamic_lookup /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/video_reader/VideoReader.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/seekable_buffer.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/cc_stream.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/util.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/subtitle_sampler.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/audio_stream.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/subtitle_stream.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/audio_sampler.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/stream.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/time_keeper.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/memory_buffer.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/decoder.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/sync_decoder.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/video_stream.o /private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/build/temp.macosx-10.15.6-x86_64-3.7/private/var/folders/21/hwq39zyj4g36x6zjfyl5l8080000gn/T/Adam/spack-stage/spack-stage-py-torchvision-0.7.0-732klihrqrcpdkrv4mispep23skf2cbr/spack-src/torchvision/csrc/cpu/decoder/video_sampler.o -L/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/lib -L/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/python-3.7.8-skt55xepunmc5rj2diqoljahpau3zwrm/lib -lavcodec -lavformat -lavutil -lswresample -lswscale -lc10 -ltorch -ltorch_cpu -ltorch_python -o build/lib.macosx-10.15.6-x86_64-3.7/torchvision/video_reader.so -std=c++14 ld: library not found for -lavcodec clang: error: linker command failed with exit code 1 (use -v to see invocation) error: command '/usr/bin/clang++' failed with exit status 1 ``` As you can see, at some points of the build, it is using the compiler wrapper `/Users/Adam/spack/lib/spack/env/clang/clang++`, while at other parts it's using the actual compiler `/usr/bin/clang++`. This is true whether or not I use the `ninja` backend for building. Note that I have a few local patches to Spack's torchvision recipe to add `TORCHVISION_LIBRARY` and `TORCHVISION_INCLUDE`, see below. ## Expected behavior If `CXX` is set to a particular compiler, I would expect the build to use that compiler for all compilation. Secondly, if `TORCHVISION_LIBRARY` is mentioned in the README as a way to locate libraries, I would expect that to work. ## Environment - PyTorch / torchvision Version: 1.6.0 / 0.7.0 (also reproduced with 1.5.1 / 0.6.1) - OS: macOS 10.15.6 - Compiler: Apple Clang 11.0.3 - How you installed PyTorch / torchvision: `spack` (source) - Build command you used (if compiling from source): `python setup.py build && python setup.py install` - Python version: 3.7.8 - CUDA/cuDNN version: N/A - GPU models and configuration: N/A - Any other relevant information: build environment contains the following env vars: ```bash TORCHVISION_INCLUDE=/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/python-3.7.8-skt55xepunmc5rj2diqoljahpau3zwrm/include/python3.7m:/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/include:/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/ffmpeg-4.2.2-hoilb6sa2j6ggatrb2xvmuazqrtgcoa7/include TORCHVISION_LIBRARY=/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/python-3.7.8-skt55xepunmc5rj2diqoljahpau3zwrm/lib:/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/py-torch-1.6.0-7zhzk2mza5nl3hvalsoql43dq56p7vs6/lib/python3.7/site-packages/torch/lib:/Users/Adam/spack/opt/spack/darwin-catalina-x86_64/apple-clang-11.0.3/ffmpeg-4.2.2-hoilb6sa2j6ggatrb2xvmuazqrtgcoa7/lib ``` ## Additional context I realize that you likely don't get a lot of bug reports from people installing torchvision with Spack. I'm a Spack developer, so I'm happy to handle all of the Spack-specific questions. Also, if any torch/torchvision developers would like to help me maintain our Spack recipes, let me know!
[ { "content": "import os\nimport io\nimport sys\nfrom setuptools import setup, find_packages\nfrom pkg_resources import parse_version, get_distribution, DistributionNotFound\nimport subprocess\nimport distutils.command.clean\nimport distutils.spawn\nimport glob\nimport shutil\n\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME\nfrom torch.utils.hipify import hipify_python\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\ndef get_dist(pkgname):\n try:\n return get_distribution(pkgname)\n except DistributionNotFound:\n return None\n\n\nversion = '0.8.0a0'\nsha = 'Unknown'\npackage_name = 'torchvision'\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(\"Building wheel {}-{}\".format(package_name, version))\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'torchvision', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n f.write(\"from torchvision.extension import _check_cuda_version\\n\")\n f.write(\"if _check_cuda_version() > 0:\\n\")\n f.write(\" cuda = _check_cuda_version()\\n\")\n\n\nwrite_version_file()\n\nreadme = open('README.rst').read()\n\npytorch_dep = 'torch'\nif os.getenv('PYTORCH_VERSION'):\n pytorch_dep += \"==\" + os.getenv('PYTORCH_VERSION')\n\nrequirements = [\n 'numpy',\n pytorch_dep,\n]\n\npillow_ver = ' >= 4.1.1'\npillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow'\nrequirements.append(pillow_req + pillow_ver)\n\n\ndef find_library(name, vision_include):\n this_dir = os.path.dirname(os.path.abspath(__file__))\n build_prefix = os.environ.get('BUILD_PREFIX', None)\n is_conda_build = build_prefix is not None\n\n library_found = False\n conda_installed = False\n lib_folder = None\n include_folder = None\n library_header = '{0}.h'.format(name)\n\n # Lookup in TORCHVISION_INCLUDE or in the package file\n package_path = [os.path.join(this_dir, 'torchvision')]\n for folder in vision_include + package_path:\n candidate_path = os.path.join(folder, library_header)\n library_found = os.path.exists(candidate_path)\n if library_found:\n break\n\n if not library_found:\n print('Running build on conda-build: {0}'.format(is_conda_build))\n if is_conda_build:\n # Add conda headers/libraries\n if os.name == 'nt':\n build_prefix = os.path.join(build_prefix, 'Library')\n include_folder = os.path.join(build_prefix, 'include')\n lib_folder = os.path.join(build_prefix, 'lib')\n library_header_path = os.path.join(\n include_folder, library_header)\n library_found = os.path.isfile(library_header_path)\n conda_installed = library_found\n else:\n # Check if using Anaconda to produce wheels\n conda = distutils.spawn.find_executable('conda')\n is_conda = conda is not None\n print('Running build on conda: {0}'.format(is_conda))\n if is_conda:\n python_executable = sys.executable\n py_folder = os.path.dirname(python_executable)\n if os.name == 'nt':\n env_path = os.path.join(py_folder, 'Library')\n else:\n env_path = os.path.dirname(py_folder)\n lib_folder = os.path.join(env_path, 'lib')\n include_folder = os.path.join(env_path, 'include')\n library_header_path = os.path.join(\n include_folder, library_header)\n library_found = os.path.isfile(library_header_path)\n conda_installed = library_found\n\n if not library_found:\n if sys.platform == 'linux':\n library_found = os.path.exists('/usr/include/{0}'.format(\n library_header))\n library_found = library_found or os.path.exists(\n '/usr/local/include/{0}'.format(library_header))\n\n return library_found, conda_installed, include_folder, lib_folder\n\n\ndef get_extensions():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n extensions_dir = os.path.join(this_dir, 'torchvision', 'csrc')\n\n main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))\n source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))\n\n is_rocm_pytorch = False\n if torch.__version__ >= '1.5':\n from torch.utils.cpp_extension import ROCM_HOME\n is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False\n\n if is_rocm_pytorch:\n hipify_python.hipify(\n project_directory=this_dir,\n output_directory=this_dir,\n includes=\"torchvision/csrc/cuda/*\",\n show_detailed=True,\n is_pytorch_extension=True,\n )\n source_cuda = glob.glob(os.path.join(extensions_dir, 'hip', '*.hip'))\n # Copy over additional files\n shutil.copy(\"torchvision/csrc/cuda/cuda_helpers.h\", \"torchvision/csrc/hip/cuda_helpers.h\")\n shutil.copy(\"torchvision/csrc/cuda/vision_cuda.h\", \"torchvision/csrc/hip/vision_cuda.h\")\n\n else:\n source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))\n\n sources = main_file + source_cpu\n extension = CppExtension\n\n compile_cpp_tests = os.getenv('WITH_CPP_MODELS_TEST', '0') == '1'\n if compile_cpp_tests:\n test_dir = os.path.join(this_dir, 'test')\n models_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'models')\n test_file = glob.glob(os.path.join(test_dir, '*.cpp'))\n source_models = glob.glob(os.path.join(models_dir, '*.cpp'))\n\n test_file = [os.path.join(test_dir, s) for s in test_file]\n source_models = [os.path.join(models_dir, s) for s in source_models]\n tests = test_file + source_models\n tests_include_dirs = [test_dir, models_dir]\n\n define_macros = []\n\n extra_compile_args = {}\n if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \\\n or os.getenv('FORCE_CUDA', '0') == '1':\n extension = CUDAExtension\n sources += source_cuda\n if not is_rocm_pytorch:\n define_macros += [('WITH_CUDA', None)]\n nvcc_flags = os.getenv('NVCC_FLAGS', '')\n if nvcc_flags == '':\n nvcc_flags = []\n else:\n nvcc_flags = nvcc_flags.split(' ')\n else:\n define_macros += [('WITH_HIP', None)]\n nvcc_flags = []\n extra_compile_args = {\n 'cxx': [],\n 'nvcc': nvcc_flags,\n }\n\n if sys.platform == 'win32':\n define_macros += [('torchvision_EXPORTS', None)]\n\n extra_compile_args.setdefault('cxx', [])\n extra_compile_args['cxx'].append('/MP')\n\n debug_mode = os.getenv('DEBUG', '0') == '1'\n if debug_mode:\n print(\"Compile in debug mode\")\n extra_compile_args['cxx'].append(\"-g\")\n extra_compile_args['cxx'].append(\"-O0\")\n if \"nvcc\" in extra_compile_args:\n # we have to remove \"-OX\" and \"-g\" flag if exists and append\n nvcc_flags = extra_compile_args[\"nvcc\"]\n extra_compile_args[\"nvcc\"] = [\n f for f in nvcc_flags if not (\"-O\" in f or \"-g\" in f)\n ]\n extra_compile_args[\"nvcc\"].append(\"-O0\")\n extra_compile_args[\"nvcc\"].append(\"-g\")\n\n sources = [os.path.join(extensions_dir, s) for s in sources]\n\n include_dirs = [extensions_dir]\n\n ext_modules = [\n extension(\n 'torchvision._C',\n sources,\n include_dirs=include_dirs,\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n )\n ]\n if compile_cpp_tests:\n ext_modules.append(\n extension(\n 'torchvision._C_tests',\n tests,\n include_dirs=tests_include_dirs,\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n )\n )\n\n # ------------------- Torchvision extra extensions ------------------------\n vision_include = os.environ.get('TORCHVISION_INCLUDE', None)\n vision_library = os.environ.get('TORCHVISION_LIBRARY', None)\n vision_include = (vision_include.split(os.pathsep)\n if vision_include is not None else [])\n vision_library = (vision_library.split(os.pathsep)\n if vision_library is not None else [])\n include_dirs += vision_include\n library_dirs = vision_library\n\n # Image reading extension\n image_macros = []\n image_include = [extensions_dir]\n image_library = []\n image_link_flags = []\n\n # Locating libPNG\n libpng = distutils.spawn.find_executable('libpng-config')\n pngfix = distutils.spawn.find_executable('pngfix')\n png_found = libpng is not None or pngfix is not None\n image_macros += [('PNG_FOUND', str(int(png_found)))]\n print('PNG found: {0}'.format(png_found))\n if png_found:\n if libpng is not None:\n # Linux / Mac\n png_version = subprocess.run([libpng, '--version'],\n stdout=subprocess.PIPE)\n png_version = png_version.stdout.strip().decode('utf-8')\n print('libpng version: {0}'.format(png_version))\n png_version = parse_version(png_version)\n if png_version >= parse_version(\"1.6.0\"):\n print('Building torchvision with PNG image support')\n png_lib = subprocess.run([libpng, '--libdir'],\n stdout=subprocess.PIPE)\n png_lib = png_lib.stdout.strip().decode('utf-8')\n if 'disabled' not in png_lib:\n image_library += [png_lib]\n png_include = subprocess.run([libpng, '--I_opts'],\n stdout=subprocess.PIPE)\n png_include = png_include.stdout.strip().decode('utf-8')\n _, png_include = png_include.split('-I')\n print('libpng include path: {0}'.format(png_include))\n image_include += [png_include]\n image_link_flags.append('png')\n else:\n print('libpng installed version is less than 1.6.0, '\n 'disabling PNG support')\n png_found = False\n else:\n # Windows\n png_lib = os.path.join(\n os.path.dirname(os.path.dirname(pngfix)), 'lib')\n png_include = os.path.join(os.path.dirname(\n os.path.dirname(pngfix)), 'include', 'libpng16')\n image_library += [png_lib]\n image_include += [png_include]\n image_link_flags.append('libpng')\n\n # Locating libjpeg\n (jpeg_found, jpeg_conda,\n jpeg_include, jpeg_lib) = find_library('jpeglib', vision_include)\n\n print('JPEG found: {0}'.format(jpeg_found))\n image_macros += [('JPEG_FOUND', str(int(jpeg_found)))]\n if jpeg_found:\n print('Building torchvision with JPEG image support')\n image_link_flags.append('jpeg')\n if jpeg_conda:\n image_library += [jpeg_lib]\n image_include += [jpeg_include]\n\n image_path = os.path.join(extensions_dir, 'cpu', 'image')\n image_src = glob.glob(os.path.join(image_path, '*.cpp'))\n\n if png_found or jpeg_found:\n ext_modules.append(extension(\n 'torchvision.image',\n image_src,\n include_dirs=image_include + include_dirs + [image_path],\n library_dirs=image_library + library_dirs,\n define_macros=image_macros,\n libraries=image_link_flags,\n extra_compile_args=extra_compile_args\n ))\n\n ffmpeg_exe = distutils.spawn.find_executable('ffmpeg')\n has_ffmpeg = ffmpeg_exe is not None\n\n if has_ffmpeg:\n ffmpeg_bin = os.path.dirname(ffmpeg_exe)\n ffmpeg_root = os.path.dirname(ffmpeg_bin)\n ffmpeg_include_dir = os.path.join(ffmpeg_root, 'include')\n\n # TorchVision base decoder + video reader\n video_reader_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'cpu', 'video_reader')\n video_reader_src = glob.glob(os.path.join(video_reader_src_dir, \"*.cpp\"))\n base_decoder_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'cpu', 'decoder')\n base_decoder_src = glob.glob(\n os.path.join(base_decoder_src_dir, \"*.cpp\"))\n # exclude tests\n base_decoder_src = [x for x in base_decoder_src if '_test.cpp' not in x]\n\n combined_src = video_reader_src + base_decoder_src\n\n ext_modules.append(\n CppExtension(\n 'torchvision.video_reader',\n combined_src,\n include_dirs=[\n base_decoder_src_dir,\n video_reader_src_dir,\n ffmpeg_include_dir,\n extensions_dir,\n ],\n libraries=[\n 'avcodec',\n 'avformat',\n 'avutil',\n 'swresample',\n 'swscale',\n ],\n extra_compile_args=[\"-std=c++14\"],\n extra_link_args=[\"-std=c++14\"],\n )\n )\n\n return ext_modules\n\n\nclass clean(distutils.command.clean.clean):\n def run(self):\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split('\\n')):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n\n\nsetup(\n # Metadata\n name=package_name,\n version=version,\n author='PyTorch Core Team',\n author_email='[email protected]',\n url='https://github.com/pytorch/vision',\n description='image and video datasets and models for torch deep learning',\n long_description=readme,\n license='BSD',\n\n # Package info\n packages=find_packages(exclude=('test',)),\n package_data={\n package_name: ['*.dll', '*.dylib', '*.so']\n },\n zip_safe=False,\n install_requires=requirements,\n extras_require={\n \"scipy\": [\"scipy\"],\n },\n ext_modules=get_extensions(),\n cmdclass={\n 'build_ext': BuildExtension.with_options(no_python_abi_suffix=True),\n 'clean': clean,\n }\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nimport io\nimport sys\nfrom setuptools import setup, find_packages\nfrom pkg_resources import parse_version, get_distribution, DistributionNotFound\nimport subprocess\nimport distutils.command.clean\nimport distutils.spawn\nimport glob\nimport shutil\n\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME\nfrom torch.utils.hipify import hipify_python\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\ndef get_dist(pkgname):\n try:\n return get_distribution(pkgname)\n except DistributionNotFound:\n return None\n\n\nversion = '0.8.0a0'\nsha = 'Unknown'\npackage_name = 'torchvision'\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(\"Building wheel {}-{}\".format(package_name, version))\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'torchvision', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n f.write(\"from torchvision.extension import _check_cuda_version\\n\")\n f.write(\"if _check_cuda_version() > 0:\\n\")\n f.write(\" cuda = _check_cuda_version()\\n\")\n\n\nwrite_version_file()\n\nreadme = open('README.rst').read()\n\npytorch_dep = 'torch'\nif os.getenv('PYTORCH_VERSION'):\n pytorch_dep += \"==\" + os.getenv('PYTORCH_VERSION')\n\nrequirements = [\n 'numpy',\n pytorch_dep,\n]\n\npillow_ver = ' >= 4.1.1'\npillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow'\nrequirements.append(pillow_req + pillow_ver)\n\n\ndef find_library(name, vision_include):\n this_dir = os.path.dirname(os.path.abspath(__file__))\n build_prefix = os.environ.get('BUILD_PREFIX', None)\n is_conda_build = build_prefix is not None\n\n library_found = False\n conda_installed = False\n lib_folder = None\n include_folder = None\n library_header = '{0}.h'.format(name)\n\n # Lookup in TORCHVISION_INCLUDE or in the package file\n package_path = [os.path.join(this_dir, 'torchvision')]\n for folder in vision_include + package_path:\n candidate_path = os.path.join(folder, library_header)\n library_found = os.path.exists(candidate_path)\n if library_found:\n break\n\n if not library_found:\n print('Running build on conda-build: {0}'.format(is_conda_build))\n if is_conda_build:\n # Add conda headers/libraries\n if os.name == 'nt':\n build_prefix = os.path.join(build_prefix, 'Library')\n include_folder = os.path.join(build_prefix, 'include')\n lib_folder = os.path.join(build_prefix, 'lib')\n library_header_path = os.path.join(\n include_folder, library_header)\n library_found = os.path.isfile(library_header_path)\n conda_installed = library_found\n else:\n # Check if using Anaconda to produce wheels\n conda = distutils.spawn.find_executable('conda')\n is_conda = conda is not None\n print('Running build on conda: {0}'.format(is_conda))\n if is_conda:\n python_executable = sys.executable\n py_folder = os.path.dirname(python_executable)\n if os.name == 'nt':\n env_path = os.path.join(py_folder, 'Library')\n else:\n env_path = os.path.dirname(py_folder)\n lib_folder = os.path.join(env_path, 'lib')\n include_folder = os.path.join(env_path, 'include')\n library_header_path = os.path.join(\n include_folder, library_header)\n library_found = os.path.isfile(library_header_path)\n conda_installed = library_found\n\n if not library_found:\n if sys.platform == 'linux':\n library_found = os.path.exists('/usr/include/{0}'.format(\n library_header))\n library_found = library_found or os.path.exists(\n '/usr/local/include/{0}'.format(library_header))\n\n return library_found, conda_installed, include_folder, lib_folder\n\n\ndef get_extensions():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n extensions_dir = os.path.join(this_dir, 'torchvision', 'csrc')\n\n main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))\n source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))\n\n is_rocm_pytorch = False\n if torch.__version__ >= '1.5':\n from torch.utils.cpp_extension import ROCM_HOME\n is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False\n\n if is_rocm_pytorch:\n hipify_python.hipify(\n project_directory=this_dir,\n output_directory=this_dir,\n includes=\"torchvision/csrc/cuda/*\",\n show_detailed=True,\n is_pytorch_extension=True,\n )\n source_cuda = glob.glob(os.path.join(extensions_dir, 'hip', '*.hip'))\n # Copy over additional files\n shutil.copy(\"torchvision/csrc/cuda/cuda_helpers.h\", \"torchvision/csrc/hip/cuda_helpers.h\")\n shutil.copy(\"torchvision/csrc/cuda/vision_cuda.h\", \"torchvision/csrc/hip/vision_cuda.h\")\n\n else:\n source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))\n\n sources = main_file + source_cpu\n extension = CppExtension\n\n compile_cpp_tests = os.getenv('WITH_CPP_MODELS_TEST', '0') == '1'\n if compile_cpp_tests:\n test_dir = os.path.join(this_dir, 'test')\n models_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'models')\n test_file = glob.glob(os.path.join(test_dir, '*.cpp'))\n source_models = glob.glob(os.path.join(models_dir, '*.cpp'))\n\n test_file = [os.path.join(test_dir, s) for s in test_file]\n source_models = [os.path.join(models_dir, s) for s in source_models]\n tests = test_file + source_models\n tests_include_dirs = [test_dir, models_dir]\n\n define_macros = []\n\n extra_compile_args = {}\n if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \\\n or os.getenv('FORCE_CUDA', '0') == '1':\n extension = CUDAExtension\n sources += source_cuda\n if not is_rocm_pytorch:\n define_macros += [('WITH_CUDA', None)]\n nvcc_flags = os.getenv('NVCC_FLAGS', '')\n if nvcc_flags == '':\n nvcc_flags = []\n else:\n nvcc_flags = nvcc_flags.split(' ')\n else:\n define_macros += [('WITH_HIP', None)]\n nvcc_flags = []\n extra_compile_args = {\n 'cxx': [],\n 'nvcc': nvcc_flags,\n }\n\n if sys.platform == 'win32':\n define_macros += [('torchvision_EXPORTS', None)]\n\n extra_compile_args.setdefault('cxx', [])\n extra_compile_args['cxx'].append('/MP')\n\n debug_mode = os.getenv('DEBUG', '0') == '1'\n if debug_mode:\n print(\"Compile in debug mode\")\n extra_compile_args['cxx'].append(\"-g\")\n extra_compile_args['cxx'].append(\"-O0\")\n if \"nvcc\" in extra_compile_args:\n # we have to remove \"-OX\" and \"-g\" flag if exists and append\n nvcc_flags = extra_compile_args[\"nvcc\"]\n extra_compile_args[\"nvcc\"] = [\n f for f in nvcc_flags if not (\"-O\" in f or \"-g\" in f)\n ]\n extra_compile_args[\"nvcc\"].append(\"-O0\")\n extra_compile_args[\"nvcc\"].append(\"-g\")\n\n sources = [os.path.join(extensions_dir, s) for s in sources]\n\n include_dirs = [extensions_dir]\n\n ext_modules = [\n extension(\n 'torchvision._C',\n sources,\n include_dirs=include_dirs,\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n )\n ]\n if compile_cpp_tests:\n ext_modules.append(\n extension(\n 'torchvision._C_tests',\n tests,\n include_dirs=tests_include_dirs,\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n )\n )\n\n # ------------------- Torchvision extra extensions ------------------------\n vision_include = os.environ.get('TORCHVISION_INCLUDE', None)\n vision_library = os.environ.get('TORCHVISION_LIBRARY', None)\n vision_include = (vision_include.split(os.pathsep)\n if vision_include is not None else [])\n vision_library = (vision_library.split(os.pathsep)\n if vision_library is not None else [])\n include_dirs += vision_include\n library_dirs = vision_library\n\n # Image reading extension\n image_macros = []\n image_include = [extensions_dir]\n image_library = []\n image_link_flags = []\n\n # Locating libPNG\n libpng = distutils.spawn.find_executable('libpng-config')\n pngfix = distutils.spawn.find_executable('pngfix')\n png_found = libpng is not None or pngfix is not None\n image_macros += [('PNG_FOUND', str(int(png_found)))]\n print('PNG found: {0}'.format(png_found))\n if png_found:\n if libpng is not None:\n # Linux / Mac\n png_version = subprocess.run([libpng, '--version'],\n stdout=subprocess.PIPE)\n png_version = png_version.stdout.strip().decode('utf-8')\n print('libpng version: {0}'.format(png_version))\n png_version = parse_version(png_version)\n if png_version >= parse_version(\"1.6.0\"):\n print('Building torchvision with PNG image support')\n png_lib = subprocess.run([libpng, '--libdir'],\n stdout=subprocess.PIPE)\n png_lib = png_lib.stdout.strip().decode('utf-8')\n if 'disabled' not in png_lib:\n image_library += [png_lib]\n png_include = subprocess.run([libpng, '--I_opts'],\n stdout=subprocess.PIPE)\n png_include = png_include.stdout.strip().decode('utf-8')\n _, png_include = png_include.split('-I')\n print('libpng include path: {0}'.format(png_include))\n image_include += [png_include]\n image_link_flags.append('png')\n else:\n print('libpng installed version is less than 1.6.0, '\n 'disabling PNG support')\n png_found = False\n else:\n # Windows\n png_lib = os.path.join(\n os.path.dirname(os.path.dirname(pngfix)), 'lib')\n png_include = os.path.join(os.path.dirname(\n os.path.dirname(pngfix)), 'include', 'libpng16')\n image_library += [png_lib]\n image_include += [png_include]\n image_link_flags.append('libpng')\n\n # Locating libjpeg\n (jpeg_found, jpeg_conda,\n jpeg_include, jpeg_lib) = find_library('jpeglib', vision_include)\n\n print('JPEG found: {0}'.format(jpeg_found))\n image_macros += [('JPEG_FOUND', str(int(jpeg_found)))]\n if jpeg_found:\n print('Building torchvision with JPEG image support')\n image_link_flags.append('jpeg')\n if jpeg_conda:\n image_library += [jpeg_lib]\n image_include += [jpeg_include]\n\n image_path = os.path.join(extensions_dir, 'cpu', 'image')\n image_src = glob.glob(os.path.join(image_path, '*.cpp'))\n\n if png_found or jpeg_found:\n ext_modules.append(extension(\n 'torchvision.image',\n image_src,\n include_dirs=image_include + include_dirs + [image_path],\n library_dirs=image_library + library_dirs,\n define_macros=image_macros,\n libraries=image_link_flags,\n extra_compile_args=extra_compile_args\n ))\n\n ffmpeg_exe = distutils.spawn.find_executable('ffmpeg')\n has_ffmpeg = ffmpeg_exe is not None\n\n if has_ffmpeg:\n ffmpeg_bin = os.path.dirname(ffmpeg_exe)\n ffmpeg_root = os.path.dirname(ffmpeg_bin)\n ffmpeg_include_dir = os.path.join(ffmpeg_root, 'include')\n\n # TorchVision base decoder + video reader\n video_reader_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'cpu', 'video_reader')\n video_reader_src = glob.glob(os.path.join(video_reader_src_dir, \"*.cpp\"))\n base_decoder_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'cpu', 'decoder')\n base_decoder_src = glob.glob(\n os.path.join(base_decoder_src_dir, \"*.cpp\"))\n # exclude tests\n base_decoder_src = [x for x in base_decoder_src if '_test.cpp' not in x]\n\n combined_src = video_reader_src + base_decoder_src\n\n ext_modules.append(\n CppExtension(\n 'torchvision.video_reader',\n combined_src,\n include_dirs=[\n base_decoder_src_dir,\n video_reader_src_dir,\n ffmpeg_include_dir,\n extensions_dir,\n ],\n library_dirs=library_dirs,\n libraries=[\n 'avcodec',\n 'avformat',\n 'avutil',\n 'swresample',\n 'swscale',\n ],\n extra_compile_args=[\"-std=c++14\"],\n extra_link_args=[\"-std=c++14\"],\n )\n )\n\n return ext_modules\n\n\nclass clean(distutils.command.clean.clean):\n def run(self):\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split('\\n')):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n\n\nsetup(\n # Metadata\n name=package_name,\n version=version,\n author='PyTorch Core Team',\n author_email='[email protected]',\n url='https://github.com/pytorch/vision',\n description='image and video datasets and models for torch deep learning',\n long_description=readme,\n license='BSD',\n\n # Package info\n packages=find_packages(exclude=('test',)),\n package_data={\n package_name: ['*.dll', '*.dylib', '*.so']\n },\n zip_safe=False,\n install_requires=requirements,\n extras_require={\n \"scipy\": [\"scipy\"],\n },\n ext_modules=get_extensions(),\n cmdclass={\n 'build_ext': BuildExtension.with_options(no_python_abi_suffix=True),\n 'clean': clean,\n }\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 030e6627e1c..1bc84897fa6 100644 --- a/setup.py +++ b/setup.py @@ -358,6 +358,7 @@ def get_extensions(): ffmpeg_include_dir, extensions_dir, ], + library_dirs=library_dirs, libraries=[ 'avcodec', 'avformat',
Project-MONAI__MONAI-2793
`HighResBlock` may have wrong conv block The `Convolution` block in `HighResBlock` does not utilize acti and norm parameters thus will used default instance norm layer and prelu activation layer. However, it is different from all other `Convolution` blocks in `HighResNet`. Is is a mistake? @wyli `HighResBlock` may have wrong conv block The `Convolution` block in `HighResBlock` does not utilize acti and norm parameters thus will used default instance norm layer and prelu activation layer. However, it is different from all other `Convolution` blocks in `HighResNet`. Is is a mistake? @wyli
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks import ADN, Convolution\nfrom monai.networks.layers.simplelayers import ChannelPad\nfrom monai.utils import ChannelMatching\n\n__all__ = [\"HighResBlock\", \"HighResNet\"]\n\nDEFAULT_LAYER_PARAMS_3D = (\n # initial conv layer\n {\"name\": \"conv_0\", \"n_features\": 16, \"kernel_size\": 3},\n # residual blocks\n {\"name\": \"res_1\", \"n_features\": 16, \"kernels\": (3, 3), \"repeat\": 3},\n {\"name\": \"res_2\", \"n_features\": 32, \"kernels\": (3, 3), \"repeat\": 3},\n {\"name\": \"res_3\", \"n_features\": 64, \"kernels\": (3, 3), \"repeat\": 3},\n # final conv layers\n {\"name\": \"conv_1\", \"n_features\": 80, \"kernel_size\": 1},\n {\"name\": \"conv_2\", \"kernel_size\": 1},\n)\n\n\nclass HighResBlock(nn.Module):\n def __init__(\n self,\n spatial_dims: int,\n in_channels: int,\n out_channels: int,\n kernels: Sequence[int] = (3, 3),\n dilation: Union[Sequence[int], int] = 1,\n norm_type: Union[Tuple, str] = (\"batch\", {\"affine\": True}),\n acti_type: Union[Tuple, str] = (\"relu\", {\"inplace\": True}),\n bias: bool = False,\n channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,\n ) -> None:\n \"\"\"\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n in_channels: number of input channels.\n out_channels: number of output channels.\n kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.\n dilation: spacing between kernel elements.\n norm_type: feature normalization type and arguments.\n Defaults to ``(\"batch\", {\"affine\": True})``.\n acti_type: {``\"relu\"``, ``\"prelu\"``, ``\"relu6\"``}\n Non-linear activation using ReLU or PReLU. Defaults to ``\"relu\"``.\n bias: whether to have a bias term in convolution blocks. Defaults to False.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n channel_matching: {``\"pad\"``, ``\"project\"``}\n Specifies handling residual branch and conv branch channel mismatches. Defaults to ``\"pad\"``.\n\n - ``\"pad\"``: with zero padding.\n - ``\"project\"``: with a trainable conv with kernel size one.\n\n Raises:\n ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.\n\n \"\"\"\n super(HighResBlock, self).__init__()\n self.chn_pad = ChannelPad(\n spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, mode=channel_matching\n )\n\n layers = nn.ModuleList()\n _in_chns, _out_chns = in_channels, out_channels\n\n for kernel_size in kernels:\n layers.append(\n ADN(ordering=\"NA\", in_channels=_in_chns, act=acti_type, norm=norm_type, norm_dim=spatial_dims)\n )\n layers.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=kernel_size,\n dilation=dilation,\n bias=bias,\n )\n )\n _in_chns = _out_chns\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x_conv: torch.Tensor = self.layers(x)\n return x_conv + torch.as_tensor(self.chn_pad(x))\n\n\nclass HighResNet(nn.Module):\n \"\"\"\n Reimplementation of highres3dnet based on\n Li et al., \"On the compactness, efficiency, and representation of 3D\n convolutional networks: Brain parcellation as a pretext task\", IPMI '17\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/network/highres3dnet.py\n https://github.com/fepegar/highresnet\n\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n in_channels: number of input channels.\n out_channels: number of output channels.\n norm_type: feature normalization type and arguments.\n Defaults to ``(\"batch\", {\"affine\": True})``.\n acti_type: activation type and arguments.\n Defaults to ``(\"relu\", {\"inplace\": True})``.\n dropout_prob: probability of the feature map to be zeroed\n (only applies to the penultimate conv layer).\n bias: whether to have a bias term in convolution blocks. Defaults to False.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n layer_params: specifying key parameters of each layer/block.\n channel_matching: {``\"pad\"``, ``\"project\"``}\n Specifies handling residual branch and conv branch channel mismatches. Defaults to ``\"pad\"``.\n\n - ``\"pad\"``: with zero padding.\n - ``\"project\"``: with a trainable conv with kernel size one.\n \"\"\"\n\n def __init__(\n self,\n spatial_dims: int = 3,\n in_channels: int = 1,\n out_channels: int = 1,\n norm_type: Union[str, tuple] = (\"batch\", {\"affine\": True}),\n acti_type: Union[str, tuple] = (\"relu\", {\"inplace\": True}),\n dropout_prob: Optional[Union[Tuple, str, float]] = 0.0,\n bias: bool = False,\n layer_params: Sequence[Dict] = DEFAULT_LAYER_PARAMS_3D,\n channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,\n ) -> None:\n\n super(HighResNet, self).__init__()\n blocks = nn.ModuleList()\n\n # initial conv layer\n params = layer_params[0]\n _in_chns, _out_chns = in_channels, params[\"n_features\"]\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NA\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n )\n )\n\n # residual blocks\n for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers.\n _in_chns, _out_chns = _out_chns, params[\"n_features\"]\n _dilation = 2 ** idx\n for _ in range(params[\"repeat\"]):\n blocks.append(\n HighResBlock(\n spatial_dims=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernels=params[\"kernels\"],\n dilation=_dilation,\n norm_type=norm_type,\n acti_type=acti_type,\n bias=bias,\n channel_matching=channel_matching,\n )\n )\n _in_chns = _out_chns\n\n # final conv layers\n params = layer_params[-2]\n _in_chns, _out_chns = _out_chns, params[\"n_features\"]\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NAD\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n dropout=dropout_prob,\n )\n )\n\n params = layer_params[-1]\n _in_chns = _out_chns\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=out_channels,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NAD\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n dropout=dropout_prob,\n )\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return torch.as_tensor(self.blocks(x))\n", "path": "monai/networks/nets/highresnet.py" } ]
[ { "content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks import ADN, Convolution\nfrom monai.networks.layers.simplelayers import ChannelPad\nfrom monai.utils import ChannelMatching\n\n__all__ = [\"HighResBlock\", \"HighResNet\"]\n\nDEFAULT_LAYER_PARAMS_3D = (\n # initial conv layer\n {\"name\": \"conv_0\", \"n_features\": 16, \"kernel_size\": 3},\n # residual blocks\n {\"name\": \"res_1\", \"n_features\": 16, \"kernels\": (3, 3), \"repeat\": 3},\n {\"name\": \"res_2\", \"n_features\": 32, \"kernels\": (3, 3), \"repeat\": 3},\n {\"name\": \"res_3\", \"n_features\": 64, \"kernels\": (3, 3), \"repeat\": 3},\n # final conv layers\n {\"name\": \"conv_1\", \"n_features\": 80, \"kernel_size\": 1},\n {\"name\": \"conv_2\", \"kernel_size\": 1},\n)\n\n\nclass HighResBlock(nn.Module):\n def __init__(\n self,\n spatial_dims: int,\n in_channels: int,\n out_channels: int,\n kernels: Sequence[int] = (3, 3),\n dilation: Union[Sequence[int], int] = 1,\n norm_type: Union[Tuple, str] = (\"batch\", {\"affine\": True}),\n acti_type: Union[Tuple, str] = (\"relu\", {\"inplace\": True}),\n bias: bool = False,\n channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,\n ) -> None:\n \"\"\"\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n in_channels: number of input channels.\n out_channels: number of output channels.\n kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.\n dilation: spacing between kernel elements.\n norm_type: feature normalization type and arguments.\n Defaults to ``(\"batch\", {\"affine\": True})``.\n acti_type: {``\"relu\"``, ``\"prelu\"``, ``\"relu6\"``}\n Non-linear activation using ReLU or PReLU. Defaults to ``\"relu\"``.\n bias: whether to have a bias term in convolution blocks. Defaults to False.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n channel_matching: {``\"pad\"``, ``\"project\"``}\n Specifies handling residual branch and conv branch channel mismatches. Defaults to ``\"pad\"``.\n\n - ``\"pad\"``: with zero padding.\n - ``\"project\"``: with a trainable conv with kernel size one.\n\n Raises:\n ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.\n\n \"\"\"\n super(HighResBlock, self).__init__()\n self.chn_pad = ChannelPad(\n spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, mode=channel_matching\n )\n\n layers = nn.ModuleList()\n _in_chns, _out_chns = in_channels, out_channels\n\n for kernel_size in kernels:\n layers.append(\n ADN(ordering=\"NA\", in_channels=_in_chns, act=acti_type, norm=norm_type, norm_dim=spatial_dims)\n )\n layers.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=kernel_size,\n dilation=dilation,\n bias=bias,\n conv_only=True,\n )\n )\n _in_chns = _out_chns\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x_conv: torch.Tensor = self.layers(x)\n return x_conv + torch.as_tensor(self.chn_pad(x))\n\n\nclass HighResNet(nn.Module):\n \"\"\"\n Reimplementation of highres3dnet based on\n Li et al., \"On the compactness, efficiency, and representation of 3D\n convolutional networks: Brain parcellation as a pretext task\", IPMI '17\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/network/highres3dnet.py\n https://github.com/fepegar/highresnet\n\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n in_channels: number of input channels.\n out_channels: number of output channels.\n norm_type: feature normalization type and arguments.\n Defaults to ``(\"batch\", {\"affine\": True})``.\n acti_type: activation type and arguments.\n Defaults to ``(\"relu\", {\"inplace\": True})``.\n dropout_prob: probability of the feature map to be zeroed\n (only applies to the penultimate conv layer).\n bias: whether to have a bias term in convolution blocks. Defaults to False.\n According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,\n if a conv layer is directly followed by a batch norm layer, bias should be False.\n layer_params: specifying key parameters of each layer/block.\n channel_matching: {``\"pad\"``, ``\"project\"``}\n Specifies handling residual branch and conv branch channel mismatches. Defaults to ``\"pad\"``.\n\n - ``\"pad\"``: with zero padding.\n - ``\"project\"``: with a trainable conv with kernel size one.\n \"\"\"\n\n def __init__(\n self,\n spatial_dims: int = 3,\n in_channels: int = 1,\n out_channels: int = 1,\n norm_type: Union[str, tuple] = (\"batch\", {\"affine\": True}),\n acti_type: Union[str, tuple] = (\"relu\", {\"inplace\": True}),\n dropout_prob: Optional[Union[Tuple, str, float]] = 0.0,\n bias: bool = False,\n layer_params: Sequence[Dict] = DEFAULT_LAYER_PARAMS_3D,\n channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,\n ) -> None:\n\n super(HighResNet, self).__init__()\n blocks = nn.ModuleList()\n\n # initial conv layer\n params = layer_params[0]\n _in_chns, _out_chns = in_channels, params[\"n_features\"]\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NA\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n )\n )\n\n # residual blocks\n for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers.\n _in_chns, _out_chns = _out_chns, params[\"n_features\"]\n _dilation = 2 ** idx\n for _ in range(params[\"repeat\"]):\n blocks.append(\n HighResBlock(\n spatial_dims=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernels=params[\"kernels\"],\n dilation=_dilation,\n norm_type=norm_type,\n acti_type=acti_type,\n bias=bias,\n channel_matching=channel_matching,\n )\n )\n _in_chns = _out_chns\n\n # final conv layers\n params = layer_params[-2]\n _in_chns, _out_chns = _out_chns, params[\"n_features\"]\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=_out_chns,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NAD\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n dropout=dropout_prob,\n )\n )\n\n params = layer_params[-1]\n _in_chns = _out_chns\n blocks.append(\n Convolution(\n dimensions=spatial_dims,\n in_channels=_in_chns,\n out_channels=out_channels,\n kernel_size=params[\"kernel_size\"],\n adn_ordering=\"NAD\",\n act=acti_type,\n norm=norm_type,\n bias=bias,\n dropout=dropout_prob,\n )\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return torch.as_tensor(self.blocks(x))\n", "path": "monai/networks/nets/highresnet.py" } ]
diff --git a/monai/networks/nets/highresnet.py b/monai/networks/nets/highresnet.py index 12908a9119..f644a7835a 100644 --- a/monai/networks/nets/highresnet.py +++ b/monai/networks/nets/highresnet.py @@ -90,6 +90,7 @@ def __init__( kernel_size=kernel_size, dilation=dilation, bias=bias, + conv_only=True, ) ) _in_chns = _out_chns
ansible__ansible-31337
nxos_config intended should be intended_config ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME nxos_config module http://docs.ansible.com/ansible/latest/nxos_config_module.html ##### ANSIBLE VERSION N/A ##### CONFIGURATION N/A ##### OS / ENVIRONMENT N/A ##### SUMMARY The example shown in the documentation is wrong, it won't actually work (intended versus intended_config). The intended_config is even shown in the parameters above (which are right). ##### STEPS TO REPRODUCE ```yaml - name: diff the running-config against a provided config nxos_config: diff_against: intended intended: "{{ lookup('file', 'master.cfg') }}" ``` ##### EXPECTED RESULTS Playbook can run without failing on ```"msg": "Unsupported parameters for (nxos_config) module: intended Supported parameters include: after,backup,before,defaults,diff_against,diff_ignore_lines,force,host,intended_config,lines,match,parents,password,port,provider,replace,running_config,save,save_when,src,ssh_keyfile,timeout,transport,use_ssl,username,validate_certs" ``` ##### ACTUAL RESULTS ``` [root@localhost ~]# ansible-playbook --diff prompt_example.yml PLAY [cisco] **************************************************************************************************************************************************************************************************************************** TASK [diff the running-config against a provided config] ******************************************************************************************************************************************************************************** [DEPRECATION WARNING]: Param 'transport' is deprecated. See the module docs for more information. This feature will be removed in version 2.9. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. fatal: [n9k]: FAILED! => {"changed": false, "failed": true, "msg": "Unsupported parameters for (nxos_config) module: intended Supported parameters include: after,backup,before,defaults,diff_against,diff_ignore_lines,force,host,intended_config,lines,match,parents,password,port,provider,replace,running_config,save,save_when,src,ssh_keyfile,timeout,transport,use_ssl,username,validate_certs"} to retry, use: --limit @/root/prompt_example.retry PLAY RECAP ****************************************************************************************************************************************************************************************************************************** n9k : ok=0 changed=0 unreachable=0 failed=1 [root@localhost ~]# ansible-playbook --diff prompt_example.yml -vvvv ansible-playbook 2.5.0 config file = /root/ansible.cfg configured module search path = [u'/usr/lib/python2.7/site-packages/napalm_ansible'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible-playbook python version = 2.7.5 (default, Aug 4 2017, 00:39:18) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)] Using /root/ansible.cfg as config file setting up inventory plugins Set default localhost to localhost Parsed /root/hosts inventory source with ini plugin Loading callback plugin default of type stdout, v2.0 from /usr/lib/python2.7/site-packages/ansible/plugins/callback/__init__.pyc PLAYBOOK: prompt_example.yml ************************************************************************************************************************************************************************************************************ 1 plays in prompt_example.yml PLAY [cisco] **************************************************************************************************************************************************************************************************************************** META: ran handlers TASK [diff the running-config against a provided config] ******************************************************************************************************************************************************************************** task path: /root/prompt_example.yml:5 File lookup using /root/nxos_base.cfg as file <192.168.2.3> connection transport is cli <192.168.2.3> using connection plugin network_cli <192.168.2.3> socket_path: /root/.ansible/pc/f7aec45378 Using module file /usr/lib/python2.7/site-packages/ansible/modules/network/nxos/nxos_config.py <192.168.2.3> ESTABLISH LOCAL CONNECTION FOR USER: root <192.168.2.3> EXEC /bin/sh -c 'echo ~ && sleep 0' <192.168.2.3> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875 `" && echo ansible-tmp-1506529319.72-174572742773875="` echo /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875 `" ) && sleep 0' <192.168.2.3> PUT /tmp/tmpVp9oMN TO /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875/nxos_config.py <192.168.2.3> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875/ /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875/nxos_config.py && sleep 0' <192.168.2.3> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875/nxos_config.py; rm -rf "/root/.ansible/tmp/ansible-tmp-1506529319.72-174572742773875/" > /dev/null 2>&1 && sleep 0' [DEPRECATION WARNING]: Param 'transport' is deprecated. See the module docs for more information. This feature will be removed in version 2.9. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. fatal: [n9k]: FAILED! => { "changed": false, "failed": true, "invocation": { "module_args": { "diff_against": "intended", "intended": "!Command: show running-config\n!Time: Tue Sep 19 22:45:38 2017\n\nversion 7.0(3)I7(1)\nvdc switch id 1\n limit-resource vlan minimum 16 maximum 4094\n limit-resource vrf minimum 2 maximum 4096\n limit-resource port-channel minimum 0 maximum 511\n limit-resource u4route-mem minimum 248 maximum 248\n limit-resource u6route-mem minimum 96 maximum 96\n limit-resource m4route-mem minimum 58 maximum 58\n limit-resource m6route-mem minimum 8 maximum 8\nfeature nxapi\nfeature scp-server\n\nusername admin password 5 $5$2RzjXhgx$xZRU9GHh6fdN2koy1r6pJMIXpTIo2tP.ZZ6YI7Z11Y3 role network-admin\nusername exampleuser password 5 $5$itSlZrxc$gixqzCwyQjO4SBjrMsu2k2qkWD1H7fygx7qYuzhgFp8 role network-admin\n\nbanner motd @\nthis is a fake banner\n@\n\nip domain-lookup\nsnmp-server user admin network-admin auth md5 0xc1ddb036df145c775510428fe3c6b553 priv 0xc1ddb036df145c775510428fe3c6b553 localizedkey\nsnmp-server user exampleuser network-admin auth sha 0x7071c014b53743ca568dd2c3fd70005c5e21db5e localizedkey\nrmon event 1 description FATAL(1) owner PMON@FATAL\nrmon event 2 description CRITICAL(2) owner PMON@CRITICAL\nrmon event 3 description ERROR(3) owner PMON@ERROR\nrmon event 4 description WARNING(4) owner PMON@WARNING\nrmon event 5 description INFORMATION(5) owner PMON@INFO\n\nvlan 1,10\nvlan 10\n name STORAGE\n\nvrf context management\n\ninterface Ethernet1/1\n\ninterface Ethernet1/2\n\ninterface Ethernet1/3\n\ninterface Ethernet1/4\n\ninterface Ethernet1/5\n\ninterface Ethernet1/6\n\ninterface Ethernet1/7\n\ninterface Ethernet1/8\n\ninterface Ethernet1/9\n\ninterface Ethernet1/10\n\ninterface Ethernet1/11\n\ninterface Ethernet1/12\n\ninterface Ethernet1/13\n\ninterface Ethernet1/14\n\ninterface Ethernet1/15\n\ninterface Ethernet1/16\n\ninterface Ethernet1/17\n\ninterface Ethernet1/18\n\ninterface Ethernet1/19\n\ninterface Ethernet1/20\n no switchport\n ip address 172.16.1.1/24\n\ninterface Ethernet1/21\n\ninterface Ethernet1/22\n\ninterface Ethernet1/23\n\ninterface Ethernet1/24\n\ninterface Ethernet1/25\n\ninterface Ethernet1/26\n\ninterface Ethernet1/27\n\ninterface Ethernet1/28\n\ninterface Ethernet1/29\n\ninterface Ethernet1/30\n\ninterface Ethernet1/31\n\ninterface Ethernet1/32\n\ninterface Ethernet1/33\n\ninterface Ethernet1/34\n\ninterface Ethernet1/35\n\ninterface Ethernet1/36\n\ninterface Ethernet1/37\n\ninterface Ethernet1/38\n\ninterface Ethernet1/39\n\ninterface Ethernet1/40\n\ninterface Ethernet1/41\n\ninterface Ethernet1/42\n\ninterface Ethernet1/43\n\ninterface Ethernet1/44\n\ninterface Ethernet1/45\n\ninterface Ethernet1/46\n\ninterface Ethernet1/47\n\ninterface Ethernet1/48\n\ninterface Ethernet1/49\n\ninterface Ethernet1/50\n\ninterface Ethernet1/51\n\ninterface Ethernet1/52\n\ninterface Ethernet1/53\n\ninterface Ethernet1/54\n\ninterface Ethernet1/55\n\ninterface Ethernet1/56\n\ninterface Ethernet1/57\n\ninterface Ethernet1/58\n\ninterface Ethernet1/59\n\ninterface Ethernet1/60\n\ninterface Ethernet1/61\n\ninterface Ethernet1/62\n\ninterface Ethernet1/63\n\ninterface Ethernet1/64\n description this is a port change\n\ninterface mgmt0\n description this is the mgmt0 port\n vrf member management\n ip address 192.168.2.3/24\nline console\nline vty\nboot nxos bootflash:/nxos.7.0.3.I7.1.bin \nip route 0.0.0.0/0 192.168.2.1", "provider": { "host": null, "password": "Bullf00d", "port": null, "ssh_keyfile": null, "timeout": null, "transport": "cli", "use_ssl": null, "username": "admin", "validate_certs": null }, "transport": "cli" } }, "msg": "Unsupported parameters for (nxos_config) module: intended Supported parameters include: after,backup,before,defaults,diff_against,diff_ignore_lines,force,host,intended_config,lines,match,parents,password,port,provider,replace,running_config,save,save_when,src,ssh_keyfile,timeout,transport,use_ssl,username,validate_certs" } to retry, use: --limit @/root/prompt_example.retry PLAY RECAP ****************************************************************************************************************************************************************************************************************************** n9k : ok=0 changed=0 unreachable=0 failed=1 ```
[ { "content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: nxos_config\nextends_documentation_fragment: nxos\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Manage Cisco NXOS configuration sections\ndescription:\n - Cisco NXOS configurations use a simple block indent file syntax\n for segmenting configuration into sections. This module provides\n an implementation for working with NXOS configuration sections in\n a deterministic way. This module works with either CLI or NXAPI\n transports.\noptions:\n lines:\n description:\n - The ordered set of commands that should be configured in the\n section. The commands must be the exact same commands as found\n in the device running-config. Be sure to note the configuration\n command syntax as some commands are automatically modified by the\n device config parser.\n required: false\n default: null\n parents:\n description:\n - The ordered set of parents that uniquely identify the section\n the commands should be checked against. If the parents argument\n is omitted, the commands are checked against the set of top\n level or global commands.\n required: false\n default: null\n src:\n description:\n - The I(src) argument provides a path to the configuration file\n to load into the remote system. The path can either be a full\n system path to the configuration file if the value starts with /\n or relative to the root of the implemented role or playbook.\n This argument is mutually exclusive with the I(lines) and\n I(parents) arguments.\n required: false\n default: null\n version_added: \"2.2\"\n before:\n description:\n - The ordered set of commands to push on to the command stack if\n a change needs to be made. This allows the playbook designer\n the opportunity to perform configuration commands prior to pushing\n any changes without affecting how the set of commands are matched\n against the system.\n required: false\n default: null\n after:\n description:\n - The ordered set of commands to append to the end of the command\n stack if a change needs to be made. Just like with I(before) this\n allows the playbook designer to append a set of commands to be\n executed after the command set.\n required: false\n default: null\n match:\n description:\n - Instructs the module on the way to perform the matching of\n the set of commands against the current device config. If\n match is set to I(line), commands are matched line by line. If\n match is set to I(strict), command lines are matched with respect\n to position. If match is set to I(exact), command lines\n must be an equal match. Finally, if match is set to I(none), the\n module will not attempt to compare the source configuration with\n the running configuration on the remote device.\n required: false\n default: line\n choices: ['line', 'strict', 'exact', 'none']\n replace:\n description:\n - Instructs the module on the way to perform the configuration\n on the device. If the replace argument is set to I(line) then\n the modified lines are pushed to the device in configuration\n mode. If the replace argument is set to I(block) then the entire\n command block is pushed to the device in configuration mode if any\n line is not correct.\n required: false\n default: lineo\n choices: ['line', 'block']\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n - Note this argument should be considered deprecated. To achieve\n the equivalent, set the C(match=none) which is idempotent. This argument\n will be removed in a future release.\n required: false\n default: false\n type: bool\n backup:\n description:\n - This argument will cause the module to create a full backup of\n the current C(running-config) from the remote device before any\n changes are made. The backup file is written to the C(backup)\n folder in the playbook root directory. If the directory does not\n exist, it is created.\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n running_config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(running_config) argument allows the\n implementer to pass in the configuration to use as the base\n config for comparison.\n required: false\n default: null\n aliases: ['config']\n version_added: \"2.4\"\n defaults:\n description:\n - The I(defaults) argument will influence how the running-config\n is collected from the device. When the value is set to true,\n the command used to collect the running-config is append with\n the all keyword. When the value is set to false, the command\n is issued without the all keyword\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n save:\n description:\n - The C(save) argument instructs the module to save the\n running-config to startup-config. This operation is performed\n after any changes are made to the current running config. If\n no changes are made, the configuration is still saved to the\n startup config. This option will always cause the module to\n return changed.\n - This option is deprecated as of Ansible 2.4, use C(save_when)\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n save_when:\n description:\n - When changes are made to the device running-configuration, the\n changes are not copied to non-volatile storage by default. Using\n this argument will change that before. If the argument is set to\n I(always), then the running-config will always be copied to the\n startup-config and the I(modified) flag will always be set to\n True. If the argument is set to I(modified), then the running-config\n will only be copied to the startup-config if it has changed since\n the last save to startup-config. If the argument is set to\n I(never), the running-config will never be copied to the\n startup-config\n required: false\n default: never\n choices: ['always', 'never', 'modified']\n version_added: \"2.4\"\n diff_against:\n description:\n - When using the C(ansible-playbook --diff) command line argument\n the module can generate diffs against different sources.\n - When this option is configure as I(startup), the module will return\n the diff of the running-config against the startup-config.\n - When this option is configured as I(intended), the module will\n return the diff of the running-config against the configuration\n provided in the C(intended_config) argument.\n - When this option is configured as I(running), the module will\n return the before and after diff of the running-config with respect\n to any changes made to the device configuration.\n required: false\n default: startup\n choices: ['startup', 'intended', 'running']\n version_added: \"2.4\"\n diff_ignore_lines:\n description:\n - Use this argument to specify one or more lines that should be\n ignored during the diff. This is used for lines in the configuration\n that are automatically updated by the system. This argument takes\n a list of regular expressions or exact line matches.\n required: false\n version_added: \"2.4\"\n intended_config:\n description:\n - The C(intended_config) provides the master configuration that\n the node should conform to and is used to check the final\n running-config against. This argument will not modify any settings\n on the remote device and is strictly used to check the compliance\n of the current device's configuration against. When specifying this\n argument, the task should also modify the C(diff_against) value and\n set it to I(intended).\n required: false\n version_added: \"2.4\"\n\"\"\"\n\nEXAMPLES = \"\"\"\n---\n- name: configure top level configuration and save it\n nxos_config:\n lines: hostname {{ inventory_hostname }}\n save_when: modified\n\n- name: diff the running-config against a provided config\n nxos_config:\n diff_against: intended\n intended: \"{{ lookup('file', 'master.cfg') }}\"\n\n- nxos_config:\n lines:\n - 10 permit ip 1.1.1.1/32 any log\n - 20 permit ip 2.2.2.2/32 any log\n - 30 permit ip 3.3.3.3/32 any log\n - 40 permit ip 4.4.4.4/32 any log\n - 50 permit ip 5.5.5.5/32 any log\n parents: ip access-list test\n before: no ip access-list test\n match: exact\n\n- nxos_config:\n lines:\n - 10 permit ip 1.1.1.1/32 any log\n - 20 permit ip 2.2.2.2/32 any log\n - 30 permit ip 3.3.3.3/32 any log\n - 40 permit ip 4.4.4.4/32 any log\n parents: ip access-list test\n before: no ip access-list test\n replace: block\n\"\"\"\n\nRETURN = \"\"\"\ncommands:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['hostname foo', 'vlan 1', 'name default']\nupdates:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['hostname foo', 'vlan 1', 'name default']\nbackup_path:\n description: The full path to the backup file\n returned: when backup is yes\n type: string\n sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34\n\"\"\"\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.netcfg import NetworkConfig, dumps\nfrom ansible.module_utils.nxos import get_config, load_config, run_commands\nfrom ansible.module_utils.nxos import nxos_argument_spec\nfrom ansible.module_utils.nxos import check_args as nxos_check_args\n\n\ndef get_running_config(module, config=None):\n contents = module.params['running_config']\n if not contents:\n if not module.params['defaults'] and config:\n contents = config\n else:\n flags = ['all']\n contents = get_config(module, flags=flags)\n return NetworkConfig(indent=2, contents=contents)\n\n\ndef get_candidate(module):\n candidate = NetworkConfig(indent=2)\n if module.params['src']:\n candidate.load(module.params['src'])\n elif module.params['lines']:\n parents = module.params['parents'] or list()\n candidate.add(module.params['lines'], parents=parents)\n return candidate\n\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n argument_spec = dict(\n src=dict(type='path'),\n\n lines=dict(aliases=['commands'], type='list'),\n parents=dict(type='list'),\n\n before=dict(type='list'),\n after=dict(type='list'),\n\n match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),\n replace=dict(default='line', choices=['line', 'block']),\n\n running_config=dict(aliases=['config']),\n intended_config=dict(),\n\n defaults=dict(type='bool', default=False),\n backup=dict(type='bool', default=False),\n\n save_when=dict(choices=['always', 'never', 'modified'], default='never'),\n\n diff_against=dict(choices=['running', 'startup', 'intended']),\n diff_ignore_lines=dict(type='list'),\n\n # save is deprecated as of ans2.4, use save_when instead\n save=dict(default=False, type='bool', removed_in_version='2.4'),\n\n # force argument deprecated in ans2.2\n force=dict(default=False, type='bool', removed_in_version='2.2')\n )\n\n argument_spec.update(nxos_argument_spec)\n\n mutually_exclusive = [('lines', 'src'),\n ('save', 'save_when')]\n\n required_if = [('match', 'strict', ['lines']),\n ('match', 'exact', ['lines']),\n ('replace', 'block', ['lines']),\n ('diff_against', 'intended', ['intended_config'])]\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n warnings = list()\n nxos_check_args(module, warnings)\n\n result = {'changed': False, 'warnings': warnings}\n\n config = None\n\n if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n\n if any((module.params['src'], module.params['lines'])):\n match = module.params['match']\n replace = module.params['replace']\n\n candidate = get_candidate(module)\n\n if match != 'none':\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=replace, path=path)\n else:\n configobjs = candidate.items\n\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n\n if module.params['before']:\n commands[:0] = module.params['before']\n\n if module.params['after']:\n commands.extend(module.params['after'])\n\n result['commands'] = commands\n result['updates'] = commands\n\n if not module.check_mode:\n load_config(module, commands)\n\n result['changed'] = True\n\n running_config = None\n startup_config = None\n\n diff_ignore_lines = module.params['diff_ignore_lines']\n\n if module.params['save']:\n module.params['save_when'] = 'always'\n\n if module.params['save_when'] != 'never':\n output = run_commands(module, ['show running-config', 'show startup-config'])\n\n running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)\n\n if running_config.sha1 != startup_config.sha1 or module.params['save_when'] == 'always':\n result['changed'] = True\n if not module.check_mode:\n cmd = {'command': 'copy running-config startup-config', 'output': 'text'}\n run_commands(module, [cmd])\n else:\n module.warn('Skipping command `copy running-config startup-config` '\n 'due to check_mode. Configuration not copied to '\n 'non-volatile storage')\n\n if module._diff:\n if not running_config:\n output = run_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n\n # recreate the object in order to process diff_ignore_lines\n running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n\n if module.params['diff_against'] == 'running':\n if module.check_mode:\n module.warn(\"unable to perform diff against running-config due to check mode\")\n contents = None\n else:\n contents = config.config_text\n\n elif module.params['diff_against'] == 'startup':\n if not startup_config:\n output = run_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n\n elif module.params['diff_against'] == 'intended':\n contents = module.params['intended_config']\n\n if contents is not None:\n base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n\n if running_config.sha1 != base_config.sha1:\n result.update({\n 'changed': True,\n 'diff': {'before': str(base_config), 'after': str(running_config)}\n })\n\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/nxos/nxos_config.py" } ]
[ { "content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: nxos_config\nextends_documentation_fragment: nxos\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Manage Cisco NXOS configuration sections\ndescription:\n - Cisco NXOS configurations use a simple block indent file syntax\n for segmenting configuration into sections. This module provides\n an implementation for working with NXOS configuration sections in\n a deterministic way. This module works with either CLI or NXAPI\n transports.\noptions:\n lines:\n description:\n - The ordered set of commands that should be configured in the\n section. The commands must be the exact same commands as found\n in the device running-config. Be sure to note the configuration\n command syntax as some commands are automatically modified by the\n device config parser.\n required: false\n default: null\n parents:\n description:\n - The ordered set of parents that uniquely identify the section\n the commands should be checked against. If the parents argument\n is omitted, the commands are checked against the set of top\n level or global commands.\n required: false\n default: null\n src:\n description:\n - The I(src) argument provides a path to the configuration file\n to load into the remote system. The path can either be a full\n system path to the configuration file if the value starts with /\n or relative to the root of the implemented role or playbook.\n This argument is mutually exclusive with the I(lines) and\n I(parents) arguments.\n required: false\n default: null\n version_added: \"2.2\"\n before:\n description:\n - The ordered set of commands to push on to the command stack if\n a change needs to be made. This allows the playbook designer\n the opportunity to perform configuration commands prior to pushing\n any changes without affecting how the set of commands are matched\n against the system.\n required: false\n default: null\n after:\n description:\n - The ordered set of commands to append to the end of the command\n stack if a change needs to be made. Just like with I(before) this\n allows the playbook designer to append a set of commands to be\n executed after the command set.\n required: false\n default: null\n match:\n description:\n - Instructs the module on the way to perform the matching of\n the set of commands against the current device config. If\n match is set to I(line), commands are matched line by line. If\n match is set to I(strict), command lines are matched with respect\n to position. If match is set to I(exact), command lines\n must be an equal match. Finally, if match is set to I(none), the\n module will not attempt to compare the source configuration with\n the running configuration on the remote device.\n required: false\n default: line\n choices: ['line', 'strict', 'exact', 'none']\n replace:\n description:\n - Instructs the module on the way to perform the configuration\n on the device. If the replace argument is set to I(line) then\n the modified lines are pushed to the device in configuration\n mode. If the replace argument is set to I(block) then the entire\n command block is pushed to the device in configuration mode if any\n line is not correct.\n required: false\n default: lineo\n choices: ['line', 'block']\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n - Note this argument should be considered deprecated. To achieve\n the equivalent, set the C(match=none) which is idempotent. This argument\n will be removed in a future release.\n required: false\n default: false\n type: bool\n backup:\n description:\n - This argument will cause the module to create a full backup of\n the current C(running-config) from the remote device before any\n changes are made. The backup file is written to the C(backup)\n folder in the playbook root directory. If the directory does not\n exist, it is created.\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n running_config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(running_config) argument allows the\n implementer to pass in the configuration to use as the base\n config for comparison.\n required: false\n default: null\n aliases: ['config']\n version_added: \"2.4\"\n defaults:\n description:\n - The I(defaults) argument will influence how the running-config\n is collected from the device. When the value is set to true,\n the command used to collect the running-config is append with\n the all keyword. When the value is set to false, the command\n is issued without the all keyword\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n save:\n description:\n - The C(save) argument instructs the module to save the\n running-config to startup-config. This operation is performed\n after any changes are made to the current running config. If\n no changes are made, the configuration is still saved to the\n startup config. This option will always cause the module to\n return changed.\n - This option is deprecated as of Ansible 2.4, use C(save_when)\n required: false\n default: false\n type: bool\n version_added: \"2.2\"\n save_when:\n description:\n - When changes are made to the device running-configuration, the\n changes are not copied to non-volatile storage by default. Using\n this argument will change that before. If the argument is set to\n I(always), then the running-config will always be copied to the\n startup-config and the I(modified) flag will always be set to\n True. If the argument is set to I(modified), then the running-config\n will only be copied to the startup-config if it has changed since\n the last save to startup-config. If the argument is set to\n I(never), the running-config will never be copied to the\n startup-config\n required: false\n default: never\n choices: ['always', 'never', 'modified']\n version_added: \"2.4\"\n diff_against:\n description:\n - When using the C(ansible-playbook --diff) command line argument\n the module can generate diffs against different sources.\n - When this option is configure as I(startup), the module will return\n the diff of the running-config against the startup-config.\n - When this option is configured as I(intended), the module will\n return the diff of the running-config against the configuration\n provided in the C(intended_config) argument.\n - When this option is configured as I(running), the module will\n return the before and after diff of the running-config with respect\n to any changes made to the device configuration.\n required: false\n default: startup\n choices: ['startup', 'intended', 'running']\n version_added: \"2.4\"\n diff_ignore_lines:\n description:\n - Use this argument to specify one or more lines that should be\n ignored during the diff. This is used for lines in the configuration\n that are automatically updated by the system. This argument takes\n a list of regular expressions or exact line matches.\n required: false\n version_added: \"2.4\"\n intended_config:\n description:\n - The C(intended_config) provides the master configuration that\n the node should conform to and is used to check the final\n running-config against. This argument will not modify any settings\n on the remote device and is strictly used to check the compliance\n of the current device's configuration against. When specifying this\n argument, the task should also modify the C(diff_against) value and\n set it to I(intended).\n required: false\n version_added: \"2.4\"\n\"\"\"\n\nEXAMPLES = \"\"\"\n---\n- name: configure top level configuration and save it\n nxos_config:\n lines: hostname {{ inventory_hostname }}\n save_when: modified\n\n- name: diff the running-config against a provided config\n nxos_config:\n diff_against: intended\n intended_config: \"{{ lookup('file', 'master.cfg') }}\"\n\n- nxos_config:\n lines:\n - 10 permit ip 1.1.1.1/32 any log\n - 20 permit ip 2.2.2.2/32 any log\n - 30 permit ip 3.3.3.3/32 any log\n - 40 permit ip 4.4.4.4/32 any log\n - 50 permit ip 5.5.5.5/32 any log\n parents: ip access-list test\n before: no ip access-list test\n match: exact\n\n- nxos_config:\n lines:\n - 10 permit ip 1.1.1.1/32 any log\n - 20 permit ip 2.2.2.2/32 any log\n - 30 permit ip 3.3.3.3/32 any log\n - 40 permit ip 4.4.4.4/32 any log\n parents: ip access-list test\n before: no ip access-list test\n replace: block\n\"\"\"\n\nRETURN = \"\"\"\ncommands:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['hostname foo', 'vlan 1', 'name default']\nupdates:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['hostname foo', 'vlan 1', 'name default']\nbackup_path:\n description: The full path to the backup file\n returned: when backup is yes\n type: string\n sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34\n\"\"\"\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.netcfg import NetworkConfig, dumps\nfrom ansible.module_utils.nxos import get_config, load_config, run_commands\nfrom ansible.module_utils.nxos import nxos_argument_spec\nfrom ansible.module_utils.nxos import check_args as nxos_check_args\n\n\ndef get_running_config(module, config=None):\n contents = module.params['running_config']\n if not contents:\n if not module.params['defaults'] and config:\n contents = config\n else:\n flags = ['all']\n contents = get_config(module, flags=flags)\n return NetworkConfig(indent=2, contents=contents)\n\n\ndef get_candidate(module):\n candidate = NetworkConfig(indent=2)\n if module.params['src']:\n candidate.load(module.params['src'])\n elif module.params['lines']:\n parents = module.params['parents'] or list()\n candidate.add(module.params['lines'], parents=parents)\n return candidate\n\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n argument_spec = dict(\n src=dict(type='path'),\n\n lines=dict(aliases=['commands'], type='list'),\n parents=dict(type='list'),\n\n before=dict(type='list'),\n after=dict(type='list'),\n\n match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),\n replace=dict(default='line', choices=['line', 'block']),\n\n running_config=dict(aliases=['config']),\n intended_config=dict(),\n\n defaults=dict(type='bool', default=False),\n backup=dict(type='bool', default=False),\n\n save_when=dict(choices=['always', 'never', 'modified'], default='never'),\n\n diff_against=dict(choices=['running', 'startup', 'intended']),\n diff_ignore_lines=dict(type='list'),\n\n # save is deprecated as of ans2.4, use save_when instead\n save=dict(default=False, type='bool', removed_in_version='2.4'),\n\n # force argument deprecated in ans2.2\n force=dict(default=False, type='bool', removed_in_version='2.2')\n )\n\n argument_spec.update(nxos_argument_spec)\n\n mutually_exclusive = [('lines', 'src'),\n ('save', 'save_when')]\n\n required_if = [('match', 'strict', ['lines']),\n ('match', 'exact', ['lines']),\n ('replace', 'block', ['lines']),\n ('diff_against', 'intended', ['intended_config'])]\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n warnings = list()\n nxos_check_args(module, warnings)\n\n result = {'changed': False, 'warnings': warnings}\n\n config = None\n\n if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n\n if any((module.params['src'], module.params['lines'])):\n match = module.params['match']\n replace = module.params['replace']\n\n candidate = get_candidate(module)\n\n if match != 'none':\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=replace, path=path)\n else:\n configobjs = candidate.items\n\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n\n if module.params['before']:\n commands[:0] = module.params['before']\n\n if module.params['after']:\n commands.extend(module.params['after'])\n\n result['commands'] = commands\n result['updates'] = commands\n\n if not module.check_mode:\n load_config(module, commands)\n\n result['changed'] = True\n\n running_config = None\n startup_config = None\n\n diff_ignore_lines = module.params['diff_ignore_lines']\n\n if module.params['save']:\n module.params['save_when'] = 'always'\n\n if module.params['save_when'] != 'never':\n output = run_commands(module, ['show running-config', 'show startup-config'])\n\n running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)\n\n if running_config.sha1 != startup_config.sha1 or module.params['save_when'] == 'always':\n result['changed'] = True\n if not module.check_mode:\n cmd = {'command': 'copy running-config startup-config', 'output': 'text'}\n run_commands(module, [cmd])\n else:\n module.warn('Skipping command `copy running-config startup-config` '\n 'due to check_mode. Configuration not copied to '\n 'non-volatile storage')\n\n if module._diff:\n if not running_config:\n output = run_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n\n # recreate the object in order to process diff_ignore_lines\n running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n\n if module.params['diff_against'] == 'running':\n if module.check_mode:\n module.warn(\"unable to perform diff against running-config due to check mode\")\n contents = None\n else:\n contents = config.config_text\n\n elif module.params['diff_against'] == 'startup':\n if not startup_config:\n output = run_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n\n elif module.params['diff_against'] == 'intended':\n contents = module.params['intended_config']\n\n if contents is not None:\n base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n\n if running_config.sha1 != base_config.sha1:\n result.update({\n 'changed': True,\n 'diff': {'before': str(base_config), 'after': str(running_config)}\n })\n\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/nxos/nxos_config.py" } ]
diff --git a/lib/ansible/modules/network/nxos/nxos_config.py b/lib/ansible/modules/network/nxos/nxos_config.py index 6e289de80206f0..8dae1f1f044dd6 100644 --- a/lib/ansible/modules/network/nxos/nxos_config.py +++ b/lib/ansible/modules/network/nxos/nxos_config.py @@ -227,7 +227,7 @@ - name: diff the running-config against a provided config nxos_config: diff_against: intended - intended: "{{ lookup('file', 'master.cfg') }}" + intended_config: "{{ lookup('file', 'master.cfg') }}" - nxos_config: lines:
django-cms__django-cms-3844
Non-existent method Page.get_title_object If you specify a language to `ExtensionToolbar.get_title_extension_admin`, it calls `Page.get_title_object`, which doesn’t exist. This should probably be `Page.get_title_obj` instead.
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.urlutils import admin_reverse\nfrom cms.api import get_page_draft\nfrom cms.toolbar_base import CMSToolbar\nfrom cms.utils import get_cms_setting\nfrom cms.utils.permissions import has_page_change_permission\nfrom django.core.urlresolvers import NoReverseMatch\n\n\nclass ExtensionToolbar(CMSToolbar):\n \"\"\"\n ExtensionToolbar provides utility functions to handle much of the boilerplate involved in creating a toolbar for\n PageExtension and TitleExtension.\n\n The basic implementation of an extension toolbar using this class is::\n\n @toolbar_pool.register\n class SampleExtension(ExtensionToolbar):\n model = ExtModel # The PageExtension / TitleExtension you are working with\n\n def populate(self):\n current_page_menu = self._setup_extension_toolbar()\n if current_page_menu:\n position = 0\n page_extension, url = self.get_page_extension_admin()\n if url:\n current_page_menu.add_modal_item('Item label', url=url,\n disabled=not self.toolbar.edit_mode,\n position=position)\n\n For TitleExtension use ``get_title_extension_admin`` and cycle on the resulting title extensions and urls\n\n @toolbar_pool.register\n class SampleExtension(ExtensionToolbar):\n model = ExtModel # The PageExtension / TitleExtension you are working with\n\n def populate(self):\n current_page_menu = self._setup_extension_toolbar()\n if current_page_menu:\n position = 0\n urls = self.get_title_extension_admin()\n for title_extension, url in urls:\n current_page_menu.add_modal_item('Item label', url=url,\n disabled=not self.toolbar.edit_mode,\n position=position)\n\n \"\"\"\n model = None\n page = None\n\n def _setup_extension_toolbar(self):\n \"\"\"\n Does all the sanity check for the current environment:\n\n * that a page exists\n * permissions check on the current page\n\n It returns the page menu or None if the above conditions are not met\n \"\"\"\n page = self._get_page()\n if not page:\n # Nothing to do\n return\n # check global permissions if CMS_PERMISSIONS is active\n if get_cms_setting('PERMISSION'):\n has_global_current_page_change_permission = has_page_change_permission(self.request)\n else:\n has_global_current_page_change_permission = True\n # check if user has page edit permission\n can_change = (self.request.current_page and\n self.request.current_page.has_change_permission(self.request))\n current_page_menu = self.toolbar.get_or_create_menu('page')\n if can_change and has_global_current_page_change_permission:\n return current_page_menu\n else:\n return\n\n def _get_page(self):\n \"\"\"\n A utility method that caches the current page and make sure to use the draft version of the page.\n \"\"\"\n # always use draft if we have a page\n if not self.page:\n self.page = get_page_draft(self.request.current_page)\n return self.page\n\n def get_page_extension_admin(self):\n \"\"\"\n Get the admin url for the page extension menu item, depending on whether a PageExtension instance exists\n for the current page or not.\n\n Return a tuple of the current extension and the url; the extension is None if no instance exists,\n the url is None is no admin is registered for the extension.\n \"\"\"\n page = self._get_page()\n # Page extension\n try:\n page_extension = self.model.objects.get(extended_object_id=page.pk)\n except self.model.DoesNotExist:\n page_extension = None\n try:\n if page_extension:\n admin_url = admin_reverse(\n '%s_%s_change' % (self.model._meta.app_label, self.model._meta.model_name),\n args=(page_extension.pk,))\n else:\n admin_url = \"%s?extended_object=%s\" % (\n admin_reverse('%s_%s_add' % (self.model._meta.app_label, self.model._meta.model_name)),\n self.page.pk)\n except NoReverseMatch: # pragma: no cover\n admin_url = None\n return page_extension, admin_url\n\n def get_title_extension_admin(self, language=None):\n \"\"\"\n Get the admin urls for the title extensions menu items, depending on whether a TitleExtension instance exists\n for each Title in the current page.\n A single language can be passed to only work on a single title.\n\n Return a list of tuples of the title extension and the url; the extension is None if no instance exists,\n the url is None is no admin is registered for the extension.\n \"\"\"\n page = self._get_page()\n urls = []\n if language:\n titles = page.get_title_object(language),\n else:\n titles = page.title_set.all()\n # Titles\n for title in titles:\n try:\n title_extension = self.model.objects.get(extended_object_id=title.pk)\n except self.model.DoesNotExist:\n title_extension = None\n try:\n if title_extension:\n admin_url = admin_reverse(\n '%s_%s_change' % (self.model._meta.app_label, self.model._meta.model_name),\n args=(title_extension.pk,))\n else:\n admin_url = \"%s?extended_object=%s\" % (\n admin_reverse('%s_%s_add' % (self.model._meta.app_label, self.model._meta.model_name)),\n title.pk)\n except NoReverseMatch: # pragma: no cover\n admin_url = None\n if admin_url:\n urls.append((title_extension, admin_url))\n return urls\n\n def _get_sub_menu(self, current_menu, key, label, position=None):\n \"\"\"\n Utility function to get a submenu of the current menu\n \"\"\"\n extension_menu = current_menu.get_or_create_menu(\n key, label, position=position)\n return extension_menu\n", "path": "cms/extensions/toolbar.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.utils.urlutils import admin_reverse\nfrom cms.api import get_page_draft\nfrom cms.toolbar_base import CMSToolbar\nfrom cms.utils import get_cms_setting\nfrom cms.utils.permissions import has_page_change_permission\nfrom django.core.urlresolvers import NoReverseMatch\n\n\nclass ExtensionToolbar(CMSToolbar):\n \"\"\"\n ExtensionToolbar provides utility functions to handle much of the boilerplate involved in creating a toolbar for\n PageExtension and TitleExtension.\n\n The basic implementation of an extension toolbar using this class is::\n\n @toolbar_pool.register\n class SampleExtension(ExtensionToolbar):\n model = ExtModel # The PageExtension / TitleExtension you are working with\n\n def populate(self):\n current_page_menu = self._setup_extension_toolbar()\n if current_page_menu:\n position = 0\n page_extension, url = self.get_page_extension_admin()\n if url:\n current_page_menu.add_modal_item('Item label', url=url,\n disabled=not self.toolbar.edit_mode,\n position=position)\n\n For TitleExtension use ``get_title_extension_admin`` and cycle on the resulting title extensions and urls\n\n @toolbar_pool.register\n class SampleExtension(ExtensionToolbar):\n model = ExtModel # The PageExtension / TitleExtension you are working with\n\n def populate(self):\n current_page_menu = self._setup_extension_toolbar()\n if current_page_menu:\n position = 0\n urls = self.get_title_extension_admin()\n for title_extension, url in urls:\n current_page_menu.add_modal_item('Item label', url=url,\n disabled=not self.toolbar.edit_mode,\n position=position)\n\n \"\"\"\n model = None\n page = None\n\n def _setup_extension_toolbar(self):\n \"\"\"\n Does all the sanity check for the current environment:\n\n * that a page exists\n * permissions check on the current page\n\n It returns the page menu or None if the above conditions are not met\n \"\"\"\n page = self._get_page()\n if not page:\n # Nothing to do\n return\n # check global permissions if CMS_PERMISSIONS is active\n if get_cms_setting('PERMISSION'):\n has_global_current_page_change_permission = has_page_change_permission(self.request)\n else:\n has_global_current_page_change_permission = True\n # check if user has page edit permission\n can_change = (self.request.current_page and\n self.request.current_page.has_change_permission(self.request))\n current_page_menu = self.toolbar.get_or_create_menu('page')\n if can_change and has_global_current_page_change_permission:\n return current_page_menu\n else:\n return\n\n def _get_page(self):\n \"\"\"\n A utility method that caches the current page and make sure to use the draft version of the page.\n \"\"\"\n # always use draft if we have a page\n if not self.page:\n self.page = get_page_draft(self.request.current_page)\n return self.page\n\n def get_page_extension_admin(self):\n \"\"\"\n Get the admin url for the page extension menu item, depending on whether a PageExtension instance exists\n for the current page or not.\n\n Return a tuple of the current extension and the url; the extension is None if no instance exists,\n the url is None is no admin is registered for the extension.\n \"\"\"\n page = self._get_page()\n # Page extension\n try:\n page_extension = self.model.objects.get(extended_object_id=page.pk)\n except self.model.DoesNotExist:\n page_extension = None\n try:\n if page_extension:\n admin_url = admin_reverse(\n '%s_%s_change' % (self.model._meta.app_label, self.model._meta.model_name),\n args=(page_extension.pk,))\n else:\n admin_url = \"%s?extended_object=%s\" % (\n admin_reverse('%s_%s_add' % (self.model._meta.app_label, self.model._meta.model_name)),\n self.page.pk)\n except NoReverseMatch: # pragma: no cover\n admin_url = None\n return page_extension, admin_url\n\n def get_title_extension_admin(self, language=None):\n \"\"\"\n Get the admin urls for the title extensions menu items, depending on whether a TitleExtension instance exists\n for each Title in the current page.\n A single language can be passed to only work on a single title.\n\n Return a list of tuples of the title extension and the url; the extension is None if no instance exists,\n the url is None is no admin is registered for the extension.\n \"\"\"\n page = self._get_page()\n urls = []\n if language:\n titles = page.get_title_obj(language),\n else:\n titles = page.title_set.all()\n # Titles\n for title in titles:\n try:\n title_extension = self.model.objects.get(extended_object_id=title.pk)\n except self.model.DoesNotExist:\n title_extension = None\n try:\n if title_extension:\n admin_url = admin_reverse(\n '%s_%s_change' % (self.model._meta.app_label, self.model._meta.model_name),\n args=(title_extension.pk,))\n else:\n admin_url = \"%s?extended_object=%s\" % (\n admin_reverse('%s_%s_add' % (self.model._meta.app_label, self.model._meta.model_name)),\n title.pk)\n except NoReverseMatch: # pragma: no cover\n admin_url = None\n if admin_url:\n urls.append((title_extension, admin_url))\n return urls\n\n def _get_sub_menu(self, current_menu, key, label, position=None):\n \"\"\"\n Utility function to get a submenu of the current menu\n \"\"\"\n extension_menu = current_menu.get_or_create_menu(\n key, label, position=position)\n return extension_menu\n", "path": "cms/extensions/toolbar.py" } ]
diff --git a/cms/extensions/toolbar.py b/cms/extensions/toolbar.py index e8e3870fd03..92eefe1f43f 100644 --- a/cms/extensions/toolbar.py +++ b/cms/extensions/toolbar.py @@ -123,7 +123,7 @@ def get_title_extension_admin(self, language=None): page = self._get_page() urls = [] if language: - titles = page.get_title_object(language), + titles = page.get_title_obj(language), else: titles = page.title_set.all() # Titles
urllib3__urllib3-2840
2.0.0a2: Logging error with `StreamHandler` / `urllib3.add_stderr_logger` ### Subject When using `urllib3.add_stderr_logger` (or using a StreamHandler), the formatting for the message to print out the request details fails. This happens on the current main (2.0.0a2), release 2.0.0a2 and 2.0.0a1. 1.26.13 works fine. ### Environment Describe your environment. At least, paste here the output of: ```python import platform import urllib3 print("OS", platform.platform()) print("Python", platform.python_version()) print("urllib3", urllib3.__version__) ``` ``` OS Linux-5.15.72-1-lts-x86_64-with-glibc2.35 Python 3.8.14 urllib3 2.0.0a2 ``` ### Steps to Reproduce A simple and isolated way to reproduce the issue. A code snippet would be great. ```python import urllib3 urllib3.add_stderr_logger() pool = urllib3.PoolManager() pool.request('GET', 'https://github.com/urllib3/urllib3') ``` ### Expected Behavior What should happen. No logging error ### Actual Behavior What happens instead. ``` (venv) [dev@dev-vm urllib3-test]$ python urllib3_test.py 2022-11-30 15:34:40,252 DEBUG Added a stderr logging handler to logger: urllib3 2022-11-30 15:34:40,252 DEBUG Starting new HTTPS connection (1): github.com:443 --- Logging error --- Traceback (most recent call last): File "/home/dev/.pyenv/versions/3.8.14/lib/python3.8/logging/__init__.py", line 1085, in emit msg = self.format(record) File "/home/dev/.pyenv/versions/3.8.14/lib/python3.8/logging/__init__.py", line 929, in format return fmt.format(record) File "/home/dev/.pyenv/versions/3.8.14/lib/python3.8/logging/__init__.py", line 668, in format record.message = record.getMessage() File "/home/dev/.pyenv/versions/3.8.14/lib/python3.8/logging/__init__.py", line 373, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting Call stack: File "urllib3_test.py", line 7, in <module> pool.request('GET', 'https://github.com/urllib3/urllib3') File "/home/dev/urllib3-test/venv/lib/python3.8/site-packages/urllib3/_request_methods.py", line 110, in request return self.request_encode_url( File "/home/dev/urllib3-test/venv/lib/python3.8/site-packages/urllib3/_request_methods.py", line 143, in request_encode_url return self.urlopen(method, url, **extra_kw) File "/home/dev/urllib3-test/venv/lib/python3.8/site-packages/urllib3/poolmanager.py", line 433, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "/home/dev/urllib3-test/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 791, in urlopen response = self._make_request( File "/home/dev/urllib3-test/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 547, in _make_request log.debug( Message: '%s://%s:%s "%s %s %s" %s' Arguments: ('https', 'github.com', 443, 'GET', '/urllib3/urllib3', 'HTTP/1.1', 200, None) ```
[ { "content": "from __future__ import annotations\n\nimport errno\nimport logging\nimport queue\nimport sys\nimport typing\nimport warnings\nimport weakref\nfrom socket import timeout as SocketTimeout\nfrom types import TracebackType\n\nfrom ._base_connection import _TYPE_BODY\nfrom ._request_methods import RequestMethods\nfrom .connection import (\n BaseSSLError,\n BrokenPipeError,\n DummyConnection,\n HTTPConnection,\n HTTPException,\n HTTPSConnection,\n ProxyConfig,\n _wrap_proxy_error,\n)\nfrom .connection import port_by_scheme as port_by_scheme\nfrom .exceptions import (\n ClosedPoolError,\n EmptyPoolError,\n FullPoolError,\n HostChangedError,\n InsecureRequestWarning,\n LocationValueError,\n MaxRetryError,\n NewConnectionError,\n ProtocolError,\n ProxyError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n)\nfrom .response import BaseHTTPResponse\nfrom .util.connection import is_connection_dropped\nfrom .util.proxy import connection_requires_http_tunnel\nfrom .util.request import _TYPE_BODY_POSITION, set_file_position\nfrom .util.retry import Retry\nfrom .util.ssl_match_hostname import CertificateError\nfrom .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout\nfrom .util.url import Url, _encode_target\nfrom .util.url import _normalize_host as normalize_host\nfrom .util.url import parse_url\nfrom .util.util import to_str\n\nif typing.TYPE_CHECKING:\n import ssl\n\n from typing_extensions import Literal\n\n from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection\n\nlog = logging.getLogger(__name__)\n\n_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]\n\n_SelfT = typing.TypeVar(\"_SelfT\")\n\n\n# Pool objects\nclass ConnectionPool:\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n\n .. note::\n ConnectionPool.urlopen() does not normalize or percent-encode target URIs\n which is useful if your target server doesn't support percent-encoded\n target URIs.\n \"\"\"\n\n scheme: str | None = None\n QueueCls = queue.LifoQueue\n\n def __init__(self, host: str, port: int | None = None) -> None:\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n self.host = _normalize_host(host, scheme=self.scheme)\n self.port = port\n\n # This property uses 'normalize_host()' (not '_normalize_host()')\n # to avoid removing square braces around IPv6 addresses.\n # This value is sent to `HTTPConnection.set_tunnel()` if called\n # because square braces are required for HTTP CONNECT tunneling.\n self._tunnel_host = normalize_host(host, scheme=self.scheme).lower()\n\n def __str__(self) -> str:\n return f\"{type(self).__name__}(host={self.host!r}, port={self.port!r})\"\n\n def __enter__(self: _SelfT) -> _SelfT:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`http.client.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`http.client.HTTPConnection`.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.ProxyManager`\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.ProxyManager`\n\n :param \\\\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = \"http\"\n ConnectionCls: (\n type[BaseHTTPConnection] | type[BaseHTTPSConnection]\n ) = HTTPConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n self.proxy_config = _proxy_config\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in range(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault(\"socket_options\", [])\n\n self.conn_kw[\"proxy\"] = self.proxy\n self.conn_kw[\"proxy_config\"] = self.proxy_config\n\n # Do not pass 'self' as callback to 'finalize'.\n # Then the 'finalize' would keep an endless living (leak) to self.\n # By just passing a reference to the pool allows the garbage collector\n # to free self if nobody else has a reference to it.\n pool = self.pool\n\n # Close all the HTTPConnections in the pool before the\n # HTTPConnectionPool object is garbage collected.\n weakref.finalize(self, _close_pool_connections, pool)\n\n def _new_conn(self) -> BaseHTTPConnection:\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTP connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"80\",\n )\n\n conn = self.ConnectionCls(\n host=self.host,\n port=self.port,\n timeout=self.timeout.connect_timeout,\n **self.conn_kw,\n )\n return conn\n\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n\n if self.pool is None:\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\") from None # Defensive:\n\n except queue.Empty:\n if self.block:\n raise EmptyPoolError(\n self,\n \"Pool is empty and a new connection can't be opened due to blocking mode.\",\n ) from None\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.debug(\"Resetting dropped connection: %s\", self.host)\n conn.close()\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n if self.pool is not None:\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except queue.Full:\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n if self.block:\n # This should never happen if you got the conn from self._get_conn\n raise FullPoolError(\n self,\n \"Pool reached maximum size and no more connections are allowed.\",\n ) from None\n\n log.warning(\n \"Connection pool is full, discarding connection: %s. Connection pool size: %s\",\n self.host,\n self.pool.qsize(),\n )\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Helper that always returns a :class:`urllib3.util.Timeout`\"\"\"\n if timeout is _DEFAULT_TIMEOUT:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n # See the above comment about EAGAIN in Python 3.\n if hasattr(err, \"errno\") and err.errno in _blocking_errnos:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param response_conn:\n Set this to ``None`` if you will handle releasing the connection or\n set the connection to have the response release it.\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param enforce_content_length:\n Enforce content length checking. Body returned by server must match\n value of Content-Length header, if present. Otherwise, raise error.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)\n\n try:\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # _validate_conn() starts the connection to an HTTPS proxy\n # so we need to wrap errors with 'ProxyError' here too.\n except (\n OSError,\n NewConnectionError,\n TimeoutError,\n BaseSSLError,\n CertificateError,\n SSLError,\n ) as e:\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n # If the connection didn't successfully connect to it's proxy\n # then there\n if isinstance(\n new_e, (OSError, NewConnectionError, TimeoutError, SSLError)\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n raise new_e\n\n # conn.request() calls http.client.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n try:\n conn.request(\n method,\n url,\n body=body,\n headers=headers,\n chunked=chunked,\n preload_content=preload_content,\n decode_content=decode_content,\n enforce_content_length=enforce_content_length,\n )\n\n # We are swallowing BrokenPipeError (errno.EPIPE) since the server is\n # legitimately able to close the connection after sending a valid response.\n # With this behaviour, the received response is still readable.\n except BrokenPipeError:\n pass\n except OSError as e:\n # MacOS/Linux\n # EPROTOTYPE is needed on macOS\n # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/\n if e.errno != errno.EPROTOTYPE:\n raise\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n if not conn.is_closed:\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={read_timeout})\"\n )\n conn.timeout = read_timeout\n\n # Receive the response from the server\n try:\n response = conn.getresponse()\n except (BaseSSLError, OSError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n # Set properties that are used by the pooling layer.\n response.retries = retries\n response._connection = response_conn # type: ignore[attr-defined]\n response._pool = self # type: ignore[attr-defined]\n\n log.debug(\n '%s://%s:%s \"%s %s %s\" %s',\n self.scheme,\n self.host,\n self.port,\n method,\n url,\n # HTTP version\n conn._http_vsn_str, # type: ignore[attr-defined]\n response.status,\n response.length_remaining, # type: ignore[attr-defined]\n )\n\n return response\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n if self.pool is None:\n return\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n # Close all the HTTPConnections in the pool.\n _close_pool_connections(old_pool)\n\n def is_same_host(self, url: str) -> bool:\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith(\"/\"):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n if host is not None:\n host = _normalize_host(host, scheme=scheme)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method\n such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When ``False``, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param bool preload_content:\n If True, the response's body will be preloaded into memory.\n\n :param bool decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of ``preload_content``\n which defaults to ``True``.\n\n :param bool chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param int body_pos:\n Position to seek to in file-like body in the event of a retry or\n redirect. Typically this won't need to be set because urllib3 will\n auto-populate the value when needed.\n \"\"\"\n parsed_url = parse_url(url)\n destination_scheme = parsed_url.scheme\n\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = preload_content\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n # Ensure that the URL we're connecting to is properly encoded\n if url.startswith(\"/\"):\n url = to_str(_encode_target(url))\n else:\n url = to_str(parsed_url.url)\n\n conn = None\n\n # Track whether `conn` needs to be released before\n # returning/raising/recursing. Update this variable if necessary, and\n # leave `release_conn` constant throughout the function. That way, if\n # the function recurses, the original value of `release_conn` will be\n # passed down into the recursive call, and its value will be respected.\n #\n # See issue #651 [1] for details.\n #\n # [1] <https://github.com/urllib3/urllib3/issues/651>\n release_this_conn = release_conn\n\n http_tunnel_required = connection_requires_http_tunnel(\n self.proxy, self.proxy_config, destination_scheme\n )\n\n # Merge the proxy headers. Only done when not using HTTP CONNECT. We\n # have to copy the headers dict so we can safely change it without those\n # changes being reflected in anyone else's copy.\n if not http_tunnel_required:\n headers = headers.copy() # type: ignore[attr-defined]\n headers.update(self.proxy_headers) # type: ignore[union-attr]\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n # Keep track of whether we cleanly exited the except block. This\n # ensures we do proper cleanup in finally.\n clean_exit = False\n\n # Rewind body position, if needed. Record current position\n # for future rewinds in the event of a redirect/retry.\n body_pos = set_file_position(body, body_pos)\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n # Is this a closed/new connection that requires CONNECT tunnelling?\n if self.proxy is not None and http_tunnel_required and conn.is_closed:\n try:\n self._prepare_proxy(conn)\n except (BaseSSLError, OSError, SocketTimeout) as e:\n self._raise_timeout(\n err=e, url=self.proxy.url, timeout_value=conn.timeout\n )\n raise\n\n # If we're going to release the connection in ``finally:``, then\n # the response doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = conn if not release_conn else None\n\n # Make the request on the HTTPConnection object\n response = self._make_request(\n conn,\n method,\n url,\n timeout=timeout_obj,\n body=body,\n headers=headers,\n chunked=chunked,\n retries=retries,\n response_conn=response_conn,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Everything went great!\n clean_exit = True\n\n except EmptyPoolError:\n # Didn't get a connection from the pool, no need to clean up\n clean_exit = True\n release_this_conn = False\n raise\n\n except (\n TimeoutError,\n HTTPException,\n OSError,\n ProtocolError,\n BaseSSLError,\n SSLError,\n CertificateError,\n ProxyError,\n ) as e:\n # Discard the connection for these exceptions. It will be\n # replaced during the next _get_conn() call.\n clean_exit = False\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e,\n (\n OSError,\n NewConnectionError,\n TimeoutError,\n SSLError,\n HTTPException,\n ),\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n elif isinstance(new_e, (OSError, HTTPException)):\n new_e = ProtocolError(\"Connection aborted.\", new_e)\n\n retries = retries.increment(\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\n )\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if not clean_exit:\n # We hit some kind of exception, handled or otherwise. We need\n # to throw the connection away unless explicitly told not to.\n # Close the connection, set the variable to None, and make sure\n # we put the None back in the pool to avoid leaking it.\n if conn:\n conn.close()\n conn = None\n release_this_conn = True\n\n if release_this_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\n \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url\n )\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries,\n redirect,\n assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep_for_retry(response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n return self.urlopen(\n method,\n redirect_location,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(response.headers.get(\"Retry-After\"))\n if retries.is_retry(method, response.status, has_retry_after):\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_status:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep(response)\n log.debug(\"Retry: %s\", url)\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`\n is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = \"https\"\n ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n\n super().__init__(\n host,\n port,\n timeout,\n maxsize,\n block,\n headers,\n retries,\n _proxy,\n _proxy_headers,\n **conn_kw,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n \"\"\"Establishes a tunnel connection through HTTP CONNECT.\"\"\"\n if self.proxy and self.proxy.scheme == \"https\":\n tunnel_scheme = \"https\"\n else:\n tunnel_scheme = \"http\"\n\n conn.set_tunnel(\n scheme=tunnel_scheme,\n host=self._tunnel_host,\n port=self.port,\n headers=self.proxy_headers,\n )\n conn.connect()\n\n def _new_conn(self) -> BaseHTTPSConnection:\n \"\"\"\n Return a fresh :class:`urllib3.connection.HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTPS connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"443\",\n )\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n raise ImportError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n\n actual_host: str = self.host\n actual_port = self.port\n if self.proxy is not None and self.proxy.host is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n return self.ConnectionCls(\n host=actual_host,\n port=actual_port,\n timeout=self.timeout.connect_timeout,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n **self.conn_kw,\n )\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super()._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if conn.is_closed:\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn(\n (\n f\"Unverified HTTPS request is being made to host '{conn.host}'. \"\n \"Adding certificate verification is strongly advised. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#tls-warnings\"\n ),\n InsecureRequestWarning,\n )\n\n\ndef connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\\\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n port = port or port_by_scheme.get(scheme, 80)\n if scheme == \"https\":\n return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n else:\n return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n\n\[email protected]\ndef _normalize_host(host: None, scheme: str | None) -> None:\n ...\n\n\[email protected]\ndef _normalize_host(host: str, scheme: str | None) -> str:\n ...\n\n\ndef _normalize_host(host: str | None, scheme: str | None) -> str | None:\n \"\"\"\n Normalize hosts for comparisons and use with sockets.\n \"\"\"\n\n host = normalize_host(host, scheme)\n\n # httplib doesn't like it when we include brackets in IPv6 addresses\n # Specifically, if we include brackets but also pass the port then\n # httplib crazily doubles up the square brackets on the Host header.\n # Instead, we need to make sure we never pass ``None`` as the port.\n # However, for backward compatibility reasons we can't actually\n # *assert* that. See http://bugs.python.org/issue28539\n if host and host.startswith(\"[\") and host.endswith(\"]\"):\n host = host[1:-1]\n return host\n\n\ndef _url_from_pool(\n pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None\n) -> str:\n \"\"\"Returns the URL from a given connection pool. This is mainly used for testing and logging.\"\"\"\n return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url\n\n\ndef _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:\n \"\"\"Drains a queue of connections and closes each one.\"\"\"\n try:\n while True:\n conn = pool.get(block=False)\n if conn:\n conn.close()\n except queue.Empty:\n pass # Done.\n", "path": "src/urllib3/connectionpool.py" } ]
[ { "content": "from __future__ import annotations\n\nimport errno\nimport logging\nimport queue\nimport sys\nimport typing\nimport warnings\nimport weakref\nfrom socket import timeout as SocketTimeout\nfrom types import TracebackType\n\nfrom ._base_connection import _TYPE_BODY\nfrom ._request_methods import RequestMethods\nfrom .connection import (\n BaseSSLError,\n BrokenPipeError,\n DummyConnection,\n HTTPConnection,\n HTTPException,\n HTTPSConnection,\n ProxyConfig,\n _wrap_proxy_error,\n)\nfrom .connection import port_by_scheme as port_by_scheme\nfrom .exceptions import (\n ClosedPoolError,\n EmptyPoolError,\n FullPoolError,\n HostChangedError,\n InsecureRequestWarning,\n LocationValueError,\n MaxRetryError,\n NewConnectionError,\n ProtocolError,\n ProxyError,\n ReadTimeoutError,\n SSLError,\n TimeoutError,\n)\nfrom .response import BaseHTTPResponse\nfrom .util.connection import is_connection_dropped\nfrom .util.proxy import connection_requires_http_tunnel\nfrom .util.request import _TYPE_BODY_POSITION, set_file_position\nfrom .util.retry import Retry\nfrom .util.ssl_match_hostname import CertificateError\nfrom .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout\nfrom .util.url import Url, _encode_target\nfrom .util.url import _normalize_host as normalize_host\nfrom .util.url import parse_url\nfrom .util.util import to_str\n\nif typing.TYPE_CHECKING:\n import ssl\n\n from typing_extensions import Literal\n\n from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection\n\nlog = logging.getLogger(__name__)\n\n_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]\n\n_SelfT = typing.TypeVar(\"_SelfT\")\n\n\n# Pool objects\nclass ConnectionPool:\n \"\"\"\n Base class for all connection pools, such as\n :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.\n\n .. note::\n ConnectionPool.urlopen() does not normalize or percent-encode target URIs\n which is useful if your target server doesn't support percent-encoded\n target URIs.\n \"\"\"\n\n scheme: str | None = None\n QueueCls = queue.LifoQueue\n\n def __init__(self, host: str, port: int | None = None) -> None:\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n self.host = _normalize_host(host, scheme=self.scheme)\n self.port = port\n\n # This property uses 'normalize_host()' (not '_normalize_host()')\n # to avoid removing square braces around IPv6 addresses.\n # This value is sent to `HTTPConnection.set_tunnel()` if called\n # because square braces are required for HTTP CONNECT tunneling.\n self._tunnel_host = normalize_host(host, scheme=self.scheme).lower()\n\n def __str__(self) -> str:\n return f\"{type(self).__name__}(host={self.host!r}, port={self.port!r})\"\n\n def __enter__(self: _SelfT) -> _SelfT:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n self.close()\n # Return False to re-raise any potential exceptions\n return False\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n\n\n# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252\n_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}\n\n\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\n \"\"\"\n Thread-safe connection pool for one host.\n\n :param host:\n Host used for this HTTP Connection (e.g. \"localhost\"), passed into\n :class:`http.client.HTTPConnection`.\n\n :param port:\n Port used for this HTTP Connection (None is equivalent to 80), passed\n into :class:`http.client.HTTPConnection`.\n\n :param timeout:\n Socket timeout in seconds for each individual connection. This can\n be a float or integer, which sets the timeout for the HTTP request,\n or an instance of :class:`urllib3.util.Timeout` which gives you more\n fine-grained control over request timeouts. After the constructor has\n been parsed, this is always a `urllib3.util.Timeout` object.\n\n :param maxsize:\n Number of connections to save that can be reused. More than 1 is useful\n in multithreaded situations. If ``block`` is set to False, more\n connections will be created but they will not be saved once they've\n been used.\n\n :param block:\n If set to True, no more than ``maxsize`` connections will be used at\n a time. When no free connections are available, the call will block\n until a connection has been released. This is a useful side effect for\n particular multithreaded situations where one does not want to use more\n than maxsize connections per host to prevent flooding.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param retries:\n Retry configuration to use by default with requests in this pool.\n\n :param _proxy:\n Parsed proxy URL, should not be used directly, instead, see\n :class:`urllib3.ProxyManager`\n\n :param _proxy_headers:\n A dictionary with proxy headers, should not be used directly,\n instead, see :class:`urllib3.ProxyManager`\n\n :param \\\\**conn_kw:\n Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,\n :class:`urllib3.connection.HTTPSConnection` instances.\n \"\"\"\n\n scheme = \"http\"\n ConnectionCls: (\n type[BaseHTTPConnection] | type[BaseHTTPSConnection]\n ) = HTTPConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n ConnectionPool.__init__(self, host, port)\n RequestMethods.__init__(self, headers)\n\n if not isinstance(timeout, Timeout):\n timeout = Timeout.from_float(timeout)\n\n if retries is None:\n retries = Retry.DEFAULT\n\n self.timeout = timeout\n self.retries = retries\n\n self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize)\n self.block = block\n\n self.proxy = _proxy\n self.proxy_headers = _proxy_headers or {}\n self.proxy_config = _proxy_config\n\n # Fill the queue up so that doing get() on it will block properly\n for _ in range(maxsize):\n self.pool.put(None)\n\n # These are mostly for testing and debugging purposes.\n self.num_connections = 0\n self.num_requests = 0\n self.conn_kw = conn_kw\n\n if self.proxy:\n # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.\n # We cannot know if the user has added default socket options, so we cannot replace the\n # list.\n self.conn_kw.setdefault(\"socket_options\", [])\n\n self.conn_kw[\"proxy\"] = self.proxy\n self.conn_kw[\"proxy_config\"] = self.proxy_config\n\n # Do not pass 'self' as callback to 'finalize'.\n # Then the 'finalize' would keep an endless living (leak) to self.\n # By just passing a reference to the pool allows the garbage collector\n # to free self if nobody else has a reference to it.\n pool = self.pool\n\n # Close all the HTTPConnections in the pool before the\n # HTTPConnectionPool object is garbage collected.\n weakref.finalize(self, _close_pool_connections, pool)\n\n def _new_conn(self) -> BaseHTTPConnection:\n \"\"\"\n Return a fresh :class:`HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTP connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"80\",\n )\n\n conn = self.ConnectionCls(\n host=self.host,\n port=self.port,\n timeout=self.timeout.connect_timeout,\n **self.conn_kw,\n )\n return conn\n\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n \"\"\"\n Get a connection. Will return a pooled connection if one is available.\n\n If no connections are available and :prop:`.block` is ``False``, then a\n fresh connection is returned.\n\n :param timeout:\n Seconds to wait before giving up and raising\n :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and\n :prop:`.block` is ``True``.\n \"\"\"\n conn = None\n\n if self.pool is None:\n raise ClosedPoolError(self, \"Pool is closed.\")\n\n try:\n conn = self.pool.get(block=self.block, timeout=timeout)\n\n except AttributeError: # self.pool is None\n raise ClosedPoolError(self, \"Pool is closed.\") from None # Defensive:\n\n except queue.Empty:\n if self.block:\n raise EmptyPoolError(\n self,\n \"Pool is empty and a new connection can't be opened due to blocking mode.\",\n ) from None\n pass # Oh well, we'll create a new connection then\n\n # If this is a persistent connection, check if it got disconnected\n if conn and is_connection_dropped(conn):\n log.debug(\"Resetting dropped connection: %s\", self.host)\n conn.close()\n\n return conn or self._new_conn()\n\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n \"\"\"\n Put a connection back into the pool.\n\n :param conn:\n Connection object for the current host and port as returned by\n :meth:`._new_conn` or :meth:`._get_conn`.\n\n If the pool is already full, the connection is closed and discarded\n because we exceeded maxsize. If connections are discarded frequently,\n then maxsize should be increased.\n\n If the pool is closed, then the connection will be closed and discarded.\n \"\"\"\n if self.pool is not None:\n try:\n self.pool.put(conn, block=False)\n return # Everything is dandy, done.\n except AttributeError:\n # self.pool is None.\n pass\n except queue.Full:\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n if self.block:\n # This should never happen if you got the conn from self._get_conn\n raise FullPoolError(\n self,\n \"Pool reached maximum size and no more connections are allowed.\",\n ) from None\n\n log.warning(\n \"Connection pool is full, discarding connection: %s. Connection pool size: %s\",\n self.host,\n self.pool.qsize(),\n )\n\n # Connection never got put back into the pool, close it.\n if conn:\n conn.close()\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n # Nothing to do for HTTP connections.\n pass\n\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Helper that always returns a :class:`urllib3.util.Timeout`\"\"\"\n if timeout is _DEFAULT_TIMEOUT:\n return self.timeout.clone()\n\n if isinstance(timeout, Timeout):\n return timeout.clone()\n else:\n # User passed us an int/float. This is for backwards compatibility,\n # can be removed later\n return Timeout.from_float(timeout)\n\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n \"\"\"Is the error actually a timeout? Will raise a ReadTimeout or pass\"\"\"\n\n if isinstance(err, SocketTimeout):\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n # See the above comment about EAGAIN in Python 3.\n if hasattr(err, \"errno\") and err.errno in _blocking_errnos:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={timeout_value})\"\n ) from err\n\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n \"\"\"\n Perform a request on a given urllib connection object taken from our\n pool.\n\n :param conn:\n a connection from one of our connection pools\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param response_conn:\n Set this to ``None`` if you will handle releasing the connection or\n set the connection to have the response release it.\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param enforce_content_length:\n Enforce content length checking. Body returned by server must match\n value of Content-Length header, if present. Otherwise, raise error.\n \"\"\"\n self.num_requests += 1\n\n timeout_obj = self._get_timeout(timeout)\n timeout_obj.start_connect()\n conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)\n\n try:\n # Trigger any extra validation we need to do.\n try:\n self._validate_conn(conn)\n except (SocketTimeout, BaseSSLError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)\n raise\n\n # _validate_conn() starts the connection to an HTTPS proxy\n # so we need to wrap errors with 'ProxyError' here too.\n except (\n OSError,\n NewConnectionError,\n TimeoutError,\n BaseSSLError,\n CertificateError,\n SSLError,\n ) as e:\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n # If the connection didn't successfully connect to it's proxy\n # then there\n if isinstance(\n new_e, (OSError, NewConnectionError, TimeoutError, SSLError)\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n raise new_e\n\n # conn.request() calls http.client.*.request, not the method in\n # urllib3.request. It also calls makefile (recv) on the socket.\n try:\n conn.request(\n method,\n url,\n body=body,\n headers=headers,\n chunked=chunked,\n preload_content=preload_content,\n decode_content=decode_content,\n enforce_content_length=enforce_content_length,\n )\n\n # We are swallowing BrokenPipeError (errno.EPIPE) since the server is\n # legitimately able to close the connection after sending a valid response.\n # With this behaviour, the received response is still readable.\n except BrokenPipeError:\n pass\n except OSError as e:\n # MacOS/Linux\n # EPROTOTYPE is needed on macOS\n # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/\n if e.errno != errno.EPROTOTYPE:\n raise\n\n # Reset the timeout for the recv() on the socket\n read_timeout = timeout_obj.read_timeout\n\n if not conn.is_closed:\n # In Python 3 socket.py will catch EAGAIN and return None when you\n # try and read into the file pointer created by http.client, which\n # instead raises a BadStatusLine exception. Instead of catching\n # the exception and assuming all BadStatusLine exceptions are read\n # timeouts, check for a zero timeout before making the request.\n if read_timeout == 0:\n raise ReadTimeoutError(\n self, url, f\"Read timed out. (read timeout={read_timeout})\"\n )\n conn.timeout = read_timeout\n\n # Receive the response from the server\n try:\n response = conn.getresponse()\n except (BaseSSLError, OSError) as e:\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n raise\n\n # Set properties that are used by the pooling layer.\n response.retries = retries\n response._connection = response_conn # type: ignore[attr-defined]\n response._pool = self # type: ignore[attr-defined]\n\n log.debug(\n '%s://%s:%s \"%s %s %s\" %s %s',\n self.scheme,\n self.host,\n self.port,\n method,\n url,\n # HTTP version\n conn._http_vsn_str, # type: ignore[attr-defined]\n response.status,\n response.length_remaining, # type: ignore[attr-defined]\n )\n\n return response\n\n def close(self) -> None:\n \"\"\"\n Close all pooled connections and disable the pool.\n \"\"\"\n if self.pool is None:\n return\n # Disable access to the pool\n old_pool, self.pool = self.pool, None\n\n # Close all the HTTPConnections in the pool.\n _close_pool_connections(old_pool)\n\n def is_same_host(self, url: str) -> bool:\n \"\"\"\n Check if the given ``url`` is a member of the same host as this\n connection pool.\n \"\"\"\n if url.startswith(\"/\"):\n return True\n\n # TODO: Add optional support for socket.gethostbyname checking.\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n if host is not None:\n host = _normalize_host(host, scheme=scheme)\n\n # Use explicit default port for comparison when none is given\n if self.port and not port:\n port = port_by_scheme.get(scheme)\n elif not self.port and port == port_by_scheme.get(scheme):\n port = None\n\n return (scheme, host, port) == (self.scheme, self.host, self.port)\n\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Get a connection from the pool and perform an HTTP request. This is the\n lowest level call for making a request, so you'll need to specify all\n the raw details.\n\n .. note::\n\n More commonly, it's appropriate to use a convenience method\n such as :meth:`request`.\n\n .. note::\n\n `release_conn` will only behave as expected if\n `preload_content=False` because we want to make\n `preload_content=False` the default behaviour someday soon without\n breaking backwards compatibility.\n\n :param method:\n HTTP request method (such as GET, POST, PUT, etc.)\n\n :param url:\n The URL to perform the request on.\n\n :param body:\n Data to send in the request body, either :class:`str`, :class:`bytes`,\n an iterable of :class:`str`/:class:`bytes`, or a file-like object.\n\n :param headers:\n Dictionary of custom headers to send, such as User-Agent,\n If-None-Match, etc. If None, pool headers are used. If provided,\n these headers completely replace any pool-specific headers.\n\n :param retries:\n Configure the number of retries to allow before raising a\n :class:`~urllib3.exceptions.MaxRetryError` exception.\n\n Pass ``None`` to retry until you receive a response. Pass a\n :class:`~urllib3.util.retry.Retry` object for fine-grained control\n over different types of retries.\n Pass an integer number to retry connection errors that many times,\n but no other types of errors. Pass zero to never retry.\n\n If ``False``, then retries are disabled and any exception is raised\n immediately. Also, instead of raising a MaxRetryError on redirects,\n the redirect response will be returned.\n\n :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.\n\n :param redirect:\n If True, automatically handle redirects (status codes 301, 302,\n 303, 307, 308). Each redirect counts as a retry. Disabling retries\n will disable redirect, too.\n\n :param assert_same_host:\n If ``True``, will make sure that the host of the pool requests is\n consistent else will raise HostChangedError. When ``False``, you can\n use the pool on an HTTP proxy and request foreign hosts.\n\n :param timeout:\n If specified, overrides the default timeout for this one\n request. It may be a float (in seconds) or an instance of\n :class:`urllib3.util.Timeout`.\n\n :param pool_timeout:\n If set and the pool is set to block=True, then this method will\n block for ``pool_timeout`` seconds and raise EmptyPoolError if no\n connection is available within the time period.\n\n :param bool preload_content:\n If True, the response's body will be preloaded into memory.\n\n :param bool decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param release_conn:\n If False, then the urlopen call will not release the connection\n back into the pool once a response is received (but will release if\n you read the entire contents of the response such as when\n `preload_content=True`). This is useful if you're not preloading\n the response's content immediately. You will need to call\n ``r.release_conn()`` on the response ``r`` to return the connection\n back into the pool. If None, it takes the value of ``preload_content``\n which defaults to ``True``.\n\n :param bool chunked:\n If True, urllib3 will send the body using chunked transfer\n encoding. Otherwise, urllib3 will send the body using the standard\n content-length form. Defaults to False.\n\n :param int body_pos:\n Position to seek to in file-like body in the event of a retry or\n redirect. Typically this won't need to be set because urllib3 will\n auto-populate the value when needed.\n \"\"\"\n parsed_url = parse_url(url)\n destination_scheme = parsed_url.scheme\n\n if headers is None:\n headers = self.headers\n\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect, default=self.retries)\n\n if release_conn is None:\n release_conn = preload_content\n\n # Check host\n if assert_same_host and not self.is_same_host(url):\n raise HostChangedError(self, url, retries)\n\n # Ensure that the URL we're connecting to is properly encoded\n if url.startswith(\"/\"):\n url = to_str(_encode_target(url))\n else:\n url = to_str(parsed_url.url)\n\n conn = None\n\n # Track whether `conn` needs to be released before\n # returning/raising/recursing. Update this variable if necessary, and\n # leave `release_conn` constant throughout the function. That way, if\n # the function recurses, the original value of `release_conn` will be\n # passed down into the recursive call, and its value will be respected.\n #\n # See issue #651 [1] for details.\n #\n # [1] <https://github.com/urllib3/urllib3/issues/651>\n release_this_conn = release_conn\n\n http_tunnel_required = connection_requires_http_tunnel(\n self.proxy, self.proxy_config, destination_scheme\n )\n\n # Merge the proxy headers. Only done when not using HTTP CONNECT. We\n # have to copy the headers dict so we can safely change it without those\n # changes being reflected in anyone else's copy.\n if not http_tunnel_required:\n headers = headers.copy() # type: ignore[attr-defined]\n headers.update(self.proxy_headers) # type: ignore[union-attr]\n\n # Must keep the exception bound to a separate variable or else Python 3\n # complains about UnboundLocalError.\n err = None\n\n # Keep track of whether we cleanly exited the except block. This\n # ensures we do proper cleanup in finally.\n clean_exit = False\n\n # Rewind body position, if needed. Record current position\n # for future rewinds in the event of a redirect/retry.\n body_pos = set_file_position(body, body_pos)\n\n try:\n # Request a connection from the queue.\n timeout_obj = self._get_timeout(timeout)\n conn = self._get_conn(timeout=pool_timeout)\n\n conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]\n\n # Is this a closed/new connection that requires CONNECT tunnelling?\n if self.proxy is not None and http_tunnel_required and conn.is_closed:\n try:\n self._prepare_proxy(conn)\n except (BaseSSLError, OSError, SocketTimeout) as e:\n self._raise_timeout(\n err=e, url=self.proxy.url, timeout_value=conn.timeout\n )\n raise\n\n # If we're going to release the connection in ``finally:``, then\n # the response doesn't need to know about the connection. Otherwise\n # it will also try to release it and we'll have a double-release\n # mess.\n response_conn = conn if not release_conn else None\n\n # Make the request on the HTTPConnection object\n response = self._make_request(\n conn,\n method,\n url,\n timeout=timeout_obj,\n body=body,\n headers=headers,\n chunked=chunked,\n retries=retries,\n response_conn=response_conn,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Everything went great!\n clean_exit = True\n\n except EmptyPoolError:\n # Didn't get a connection from the pool, no need to clean up\n clean_exit = True\n release_this_conn = False\n raise\n\n except (\n TimeoutError,\n HTTPException,\n OSError,\n ProtocolError,\n BaseSSLError,\n SSLError,\n CertificateError,\n ProxyError,\n ) as e:\n # Discard the connection for these exceptions. It will be\n # replaced during the next _get_conn() call.\n clean_exit = False\n new_e: Exception = e\n if isinstance(e, (BaseSSLError, CertificateError)):\n new_e = SSLError(e)\n if isinstance(\n new_e,\n (\n OSError,\n NewConnectionError,\n TimeoutError,\n SSLError,\n HTTPException,\n ),\n ) and (conn and conn.proxy and not conn.has_connected_to_proxy):\n new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)\n elif isinstance(new_e, (OSError, HTTPException)):\n new_e = ProtocolError(\"Connection aborted.\", new_e)\n\n retries = retries.increment(\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\n )\n retries.sleep()\n\n # Keep track of the error for the retry warning.\n err = e\n\n finally:\n if not clean_exit:\n # We hit some kind of exception, handled or otherwise. We need\n # to throw the connection away unless explicitly told not to.\n # Close the connection, set the variable to None, and make sure\n # we put the None back in the pool to avoid leaking it.\n if conn:\n conn.close()\n conn = None\n release_this_conn = True\n\n if release_this_conn:\n # Put the connection back to be reused. If the connection is\n # expired then it will be None, which will get replaced with a\n # fresh connection during _get_conn.\n self._put_conn(conn)\n\n if not conn:\n # Try again\n log.warning(\n \"Retrying (%r) after connection broken by '%r': %s\", retries, err, url\n )\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries,\n redirect,\n assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Handle redirect?\n redirect_location = redirect and response.get_redirect_location()\n if redirect_location:\n if response.status == 303:\n method = \"GET\"\n\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_redirect:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep_for_retry(response)\n log.debug(\"Redirecting %s -> %s\", url, redirect_location)\n return self.urlopen(\n method,\n redirect_location,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n # Check if we should retry the HTTP response.\n has_retry_after = bool(response.headers.get(\"Retry-After\"))\n if retries.is_retry(method, response.status, has_retry_after):\n try:\n retries = retries.increment(method, url, response=response, _pool=self)\n except MaxRetryError:\n if retries.raise_on_status:\n response.drain_conn()\n raise\n return response\n\n response.drain_conn()\n retries.sleep(response)\n log.debug(\"Retry: %s\", url)\n return self.urlopen(\n method,\n url,\n body,\n headers,\n retries=retries,\n redirect=redirect,\n assert_same_host=assert_same_host,\n timeout=timeout,\n pool_timeout=pool_timeout,\n release_conn=release_conn,\n chunked=chunked,\n body_pos=body_pos,\n preload_content=preload_content,\n decode_content=decode_content,\n **response_kw,\n )\n\n return response\n\n\nclass HTTPSConnectionPool(HTTPConnectionPool):\n \"\"\"\n Same as :class:`.HTTPConnectionPool`, but HTTPS.\n\n :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,\n ``assert_hostname`` and ``host`` in this order to verify connections.\n If ``assert_hostname`` is False, no verification is done.\n\n The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,\n ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`\n is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade\n the connection socket into an SSL socket.\n \"\"\"\n\n scheme = \"https\"\n ConnectionCls: type[BaseHTTPSConnection] = HTTPSConnection\n\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n\n super().__init__(\n host,\n port,\n timeout,\n maxsize,\n block,\n headers,\n retries,\n _proxy,\n _proxy_headers,\n **conn_kw,\n )\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.ca_certs = ca_certs\n self.ca_cert_dir = ca_cert_dir\n self.ssl_version = ssl_version\n self.ssl_minimum_version = ssl_minimum_version\n self.ssl_maximum_version = ssl_maximum_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n \"\"\"Establishes a tunnel connection through HTTP CONNECT.\"\"\"\n if self.proxy and self.proxy.scheme == \"https\":\n tunnel_scheme = \"https\"\n else:\n tunnel_scheme = \"http\"\n\n conn.set_tunnel(\n scheme=tunnel_scheme,\n host=self._tunnel_host,\n port=self.port,\n headers=self.proxy_headers,\n )\n conn.connect()\n\n def _new_conn(self) -> BaseHTTPSConnection:\n \"\"\"\n Return a fresh :class:`urllib3.connection.HTTPConnection`.\n \"\"\"\n self.num_connections += 1\n log.debug(\n \"Starting new HTTPS connection (%d): %s:%s\",\n self.num_connections,\n self.host,\n self.port or \"443\",\n )\n\n if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]\n raise ImportError(\n \"Can't connect to HTTPS URL because the SSL module is not available.\"\n )\n\n actual_host: str = self.host\n actual_port = self.port\n if self.proxy is not None and self.proxy.host is not None:\n actual_host = self.proxy.host\n actual_port = self.proxy.port\n\n return self.ConnectionCls(\n host=actual_host,\n port=actual_port,\n timeout=self.timeout.connect_timeout,\n cert_file=self.cert_file,\n key_file=self.key_file,\n key_password=self.key_password,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n ssl_version=self.ssl_version,\n ssl_minimum_version=self.ssl_minimum_version,\n ssl_maximum_version=self.ssl_maximum_version,\n **self.conn_kw,\n )\n\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n \"\"\"\n Called right before a request is made, after the socket is created.\n \"\"\"\n super()._validate_conn(conn)\n\n # Force connect early to allow us to validate the connection.\n if conn.is_closed:\n conn.connect()\n\n if not conn.is_verified:\n warnings.warn(\n (\n f\"Unverified HTTPS request is being made to host '{conn.host}'. \"\n \"Adding certificate verification is strongly advised. See: \"\n \"https://urllib3.readthedocs.io/en/latest/advanced-usage.html\"\n \"#tls-warnings\"\n ),\n InsecureRequestWarning,\n )\n\n\ndef connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:\n \"\"\"\n Given a url, return an :class:`.ConnectionPool` instance of its host.\n\n This is a shortcut for not having to parse out the scheme, host, and port\n of the url before creating an :class:`.ConnectionPool` instance.\n\n :param url:\n Absolute URL string that must include the scheme. Port is optional.\n\n :param \\\\**kw:\n Passes additional parameters to the constructor of the appropriate\n :class:`.ConnectionPool`. Useful for specifying things like\n timeout, maxsize, headers, etc.\n\n Example::\n\n >>> conn = connection_from_url('http://google.com/')\n >>> r = conn.request('GET', '/')\n \"\"\"\n scheme, _, host, port, *_ = parse_url(url)\n scheme = scheme or \"http\"\n port = port or port_by_scheme.get(scheme, 80)\n if scheme == \"https\":\n return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n else:\n return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]\n\n\[email protected]\ndef _normalize_host(host: None, scheme: str | None) -> None:\n ...\n\n\[email protected]\ndef _normalize_host(host: str, scheme: str | None) -> str:\n ...\n\n\ndef _normalize_host(host: str | None, scheme: str | None) -> str | None:\n \"\"\"\n Normalize hosts for comparisons and use with sockets.\n \"\"\"\n\n host = normalize_host(host, scheme)\n\n # httplib doesn't like it when we include brackets in IPv6 addresses\n # Specifically, if we include brackets but also pass the port then\n # httplib crazily doubles up the square brackets on the Host header.\n # Instead, we need to make sure we never pass ``None`` as the port.\n # However, for backward compatibility reasons we can't actually\n # *assert* that. See http://bugs.python.org/issue28539\n if host and host.startswith(\"[\") and host.endswith(\"]\"):\n host = host[1:-1]\n return host\n\n\ndef _url_from_pool(\n pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None\n) -> str:\n \"\"\"Returns the URL from a given connection pool. This is mainly used for testing and logging.\"\"\"\n return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url\n\n\ndef _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:\n \"\"\"Drains a queue of connections and closes each one.\"\"\"\n try:\n while True:\n conn = pool.get(block=False)\n if conn:\n conn.close()\n except queue.Empty:\n pass # Done.\n", "path": "src/urllib3/connectionpool.py" } ]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7458b12591..e9c0bf3af5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: experimental: false nox-session: test-pypy - python-version: "2.7" - os: ubuntu-latest + os: ubuntu-20.04 # CPython 2.7 is not available for ubuntu-22.04 experimental: false nox-session: unsupported_setup_py - python-version: "3.9" diff --git a/changelog/2839.bugfix.rst b/changelog/2839.bugfix.rst new file mode 100644 index 0000000000..9c3ff7091d --- /dev/null +++ b/changelog/2839.bugfix.rst @@ -0,0 +1 @@ +Fix logging error when using ``add_stderr_logger``. diff --git a/pyproject.toml b/pyproject.toml index 4df6f6d9da..bf7ba371bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,6 +81,7 @@ include = [ xfail_strict = true python_classes = ["Test", "*TestCase"] markers = ["limit_memory"] +log_level = "DEBUG" filterwarnings = [ "error", '''default:'urllib3\[secure\]' extra is deprecated and will be removed in urllib3 v2\.1\.0.*:DeprecationWarning''', diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py index 02724a378a..7dfb846ee2 100644 --- a/src/urllib3/connectionpool.py +++ b/src/urllib3/connectionpool.py @@ -545,7 +545,7 @@ def _make_request( response._pool = self # type: ignore[attr-defined] log.debug( - '%s://%s:%s "%s %s %s" %s', + '%s://%s:%s "%s %s %s" %s %s', self.scheme, self.host, self.port, diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py index 6aea007a21..aea46c8935 100644 --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -1,9 +1,7 @@ from __future__ import annotations import io -import logging import socket -import sys import time import typing import warnings @@ -39,10 +37,6 @@ pytestmark = pytest.mark.flaky -log = logging.getLogger("urllib3.connectionpool") -log.setLevel(logging.NOTSET) -log.addHandler(logging.StreamHandler(sys.stdout)) - def wait_for_socket(ready_event: Event) -> None: ready_event.wait() diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py index 440a83d906..ac0fa9419c 100644 --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -1,7 +1,6 @@ from __future__ import annotations import datetime -import logging import os.path import shutil import ssl @@ -52,11 +51,6 @@ pytestmark = pytest.mark.flaky -log = logging.getLogger("urllib3.connectionpool") -log.setLevel(logging.NOTSET) -log.addHandler(logging.StreamHandler(sys.stdout)) - - TLSv1_CERTS = DEFAULT_CERTS.copy() TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None)
pwr-Solaar__Solaar-1425
Add support for g733 lightspeed headphones It would be great to support the g733 lightspeed headphones. I have a pair of these so can help gather any information required to display stuff such as battery life etc. Not sure where to start adding support myself though...
[ { "content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom collections import namedtuple\n\nfrom . import settings_templates as _ST\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import DEVICE_KIND as _DK\nfrom .hidpp10 import REGISTERS as _R\n\n#\n#\n#\n\n_DeviceDescriptor = namedtuple(\n '_DeviceDescriptor',\n ('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings', 'persister', 'usbid', 'interface', 'btid')\n)\ndel namedtuple\n\nDEVICES_WPID = {}\nDEVICES = {}\n\n\ndef _D(\n name,\n codename=None,\n kind=None,\n wpid=None,\n protocol=None,\n registers=None,\n settings=None,\n persister=None,\n usbid=None,\n interface=None,\n btid=None,\n):\n assert name\n\n if kind is None:\n kind = (\n _DK.mouse if 'Mouse' in name else _DK.keyboard if 'Keyboard' in name else _DK.numpad\n if 'Number Pad' in name else _DK.touchpad if 'Touchpad' in name else _DK.trackball if 'Trackball' in name else None\n )\n assert kind is not None, 'descriptor for %s does not have kind set' % name\n\n # heuristic: the codename is the last word in the device name\n if codename is None and ' ' in name:\n codename = name.split(' ')[-1]\n assert codename is not None, 'descriptor for %s does not have codename set' % name\n\n if protocol is not None:\n\n if wpid:\n for w in wpid if isinstance(wpid, tuple) else (wpid, ):\n if protocol > 1.0:\n assert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n else:\n if w[0:1] == '1':\n assert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n elif w[0:1] == '2':\n assert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n device_descriptor = _DeviceDescriptor(\n name=name,\n kind=kind,\n wpid=wpid,\n codename=codename,\n protocol=protocol,\n registers=registers,\n settings=settings,\n persister=persister,\n usbid=usbid,\n interface=interface,\n btid=btid\n )\n\n if usbid:\n found = get_usbid(usbid)\n assert found is None, 'duplicate usbid in device descriptors: %s' % (found, )\n if btid:\n found = get_btid(btid)\n assert found is None, 'duplicate btid in device descriptors: %s' % (found, )\n\n assert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n DEVICES[codename] = device_descriptor\n\n if wpid:\n for w in wpid if isinstance(wpid, tuple) else (wpid, ):\n assert w not in DEVICES_WPID, 'duplicate wpid in device descriptors: %s' % (DEVICES_WPID[w], )\n DEVICES_WPID[w] = device_descriptor\n\n\ndef get_wpid(wpid):\n return DEVICES_WPID.get(wpid)\n\n\ndef get_codename(codename):\n return DEVICES.get(codename)\n\n\ndef get_usbid(usbid):\n if isinstance(usbid, str):\n usbid = int(usbid, 16)\n found = next((x for x in DEVICES.values() if x.usbid == usbid), None)\n return found\n\n\ndef get_btid(btid):\n if isinstance(btid, str):\n btid = int(btid, 16)\n found = next((x for x in DEVICES.values() if x.btid == btid), None)\n return found\n\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Devices are organized by kind\n# Within kind devices are sorted by wpid, then by usbid, then by btid, with missing values sorted later\n\n# Keyboards\n\n_D('Wireless Keyboard S510', codename='S510', protocol=1.0, wpid='0056', registers=(_R.battery_status, ))\n_D('Wireless Keyboard EX100', codename='EX100', protocol=1.0, wpid='0065', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK300', protocol=1.0, wpid='0068', registers=(_R.battery_status, ))\n_D('Number Pad N545', protocol=1.0, wpid='2006', registers=(_R.battery_status, ))\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008', registers=(_R.battery_status, ), settings=[_ST.RegisterFnSwap])\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F', registers=(_R.battery_status, ))\n_D(\n 'Wireless Illuminated Keyboard K800',\n protocol=1.0,\n wpid='2010',\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterFnSwap,\n _ST.RegisterHandDetection,\n ],\n)\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011', registers=(_R.battery_status, ), settings=[_ST.RegisterFnSwap])\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002', settings=[_ST.FnSwap])\n_D('Wireless Keyboard K270 (unifying)', protocol=2.0, wpid='4003')\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004', settings=[_ST.FnSwap])\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'), settings=[_ST.FnSwap])\n_D('Wireless Keyboard MK270', protocol=2.0, wpid='4023', settings=[_ST.FnSwap])\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032', settings=[_ST.NewFnSwap])\n_D('Wireless Touch Keyboard K400 Plus', codename='K400 Plus', protocol=2.0, wpid='404D')\n_D('Wireless Multi-Device Keyboard K780', protocol=4.5, wpid='405B', settings=[_ST.NewFnSwap])\n_D('Wireless Keyboard K375s', protocol=2.0, wpid='4061', settings=[_ST.K375sFnSwap])\n_D('Craft Advanced Keyboard', codename='Craft', protocol=4.5, wpid='4066', btid=0xB350)\n_D('Wireless Illuminated Keyboard K800 new', codename='K800 new', protocol=4.5, wpid='406E', settings=[_ST.FnSwap])\n_D('MX Keys Keyboard', codename='MX Keys', protocol=4.5, wpid='408A', btid=0xB35B)\n_D('G915 TKL LIGHTSPEED Wireless RGB Mechanical Gaming Keyboard', codename='G915 TKL', protocol=4.2, wpid='408E', usbid=0xC343)\n_D('G512 RGB Mechanical Gaming Keyboard', codename='G512', usbid=0xc33c, interface=1)\n\n# Mice\n\n_D('LX5 Cordless Mouse', codename='LX5', protocol=1.0, wpid='0036', registers=(_R.battery_status, ))\n_D('Wireless Mouse EX100', codename='EX100m', protocol=1.0, wpid='003F', registers=(_R.battery_status, ))\n_D('Wireless Mouse M30', codename='M30', protocol=1.0, wpid='0085', registers=(_R.battery_status, ))\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001', registers=(_R.battery_status, ))\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002', registers=(_R.battery_status, ))\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003', registers=(_R.battery_status, ))\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004', registers=(_R.battery_status, ))\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005', registers=(_R.battery_status, ))\n_D(\n 'VX Revolution',\n codename='VX Revolution',\n kind=_DK.mouse,\n protocol=1.0,\n wpid=('1006', '100D', '0612'),\n registers=(_R.battery_charge, )\n)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'), registers=(_R.battery_charge, ))\n_D(\n 'MX Revolution',\n codename='MX Revolution',\n protocol=1.0,\n kind=_DK.mouse,\n wpid=('1008', '100C'),\n registers=(_R.battery_charge, )\n)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'), registers=(_R.battery_charge, ))\n_D(\n 'VX Nano Cordless Laser Mouse',\n codename='VX Nano',\n protocol=1.0,\n wpid=('100B', '100F'),\n registers=(_R.battery_charge, ),\n settings=[_ST.RegisterSmoothScroll, _ST.RegisterSideScroll]\n)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011', registers=(_R.battery_charge, ))\n_D(\n 'V550 Nano Cordless Laser Mouse',\n codename='V550 Nano',\n protocol=1.0,\n wpid='1013',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'MX 1100 Cordless Laser Mouse',\n codename='MX 1100',\n protocol=1.0,\n kind=_DK.mouse,\n wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'Anywhere Mouse MX',\n codename='Anywhere MX',\n protocol=1.0,\n wpid='1017',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n\n\nclass _PerformanceMXDpi(_ST.RegisterDpi):\n choices_universe = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n validator_options = {'choices': choices_universe}\n\n\n_D(\n 'Performance Mouse MX',\n codename='Performance MX',\n protocol=1.0,\n wpid='101A',\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _PerformanceMXDpi,\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'Marathon Mouse M705 (M-R0009)',\n codename='M705 (M-R0009)',\n protocol=1.0,\n wpid='101B',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C', registers=(_R.battery_charge, ))\n_D(\n 'Wireless Mouse M505',\n codename='M505/B605',\n protocol=1.0,\n wpid='101D',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F', registers=(_R.battery_status, ), settings=[_ST.RegisterSideScroll])\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D(\n 'G700 Gaming Mouse',\n codename='G700',\n protocol=1.0,\n wpid='1023',\n usbid=0xc06b,\n interface=1,\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024', registers=(_R.battery_status, ))\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025', registers=(_R.battery_status, ), settings=[_ST.RegisterSideScroll])\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n_D(\n 'G700s Gaming Mouse',\n codename='G700s',\n protocol=1.0,\n wpid='102A',\n usbid=0xc07c,\n interface=1,\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A', settings=[_ST.HiResScroll])\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Wireless Mouse M150', protocol=2.0, wpid='4022')\n_D('Wireless Mouse M185', protocol=2.0, wpid='4038')\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041', btid=0xb012)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A', settings=[_ST.HiresSmoothInvert])\n_D('Wireless Mouse M510', protocol=2.0, wpid='4051', codename='M510v2', settings=[_ST.LowresSmoothScroll])\n_D(\n 'Wireless Mouse M185 new',\n codename='M185n',\n protocol=4.5,\n wpid='4054',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ]\n)\n_D(\n 'Wireless Mouse M185/M235/M310',\n codename='M185/M235/M310',\n protocol=4.5,\n wpid='4055',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ]\n)\n_D(\n 'Wireless Mouse MX Master 2S',\n codename='MX Master 2S',\n protocol=4.5,\n wpid='4069',\n btid=0xb019,\n settings=[\n _ST.HiresSmoothInvert,\n ],\n)\n_D(\n 'Multi Device Silent Mouse M585/M590',\n codename='M585/M590',\n protocol=4.5,\n wpid='406B',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ],\n)\n_D(\n 'Marathon Mouse M705 (M-R0073)',\n codename='M705 (M-R0073)',\n protocol=4.5,\n wpid='406D',\n settings=[\n _ST.HiresSmoothInvert,\n _ST.PointerSpeed,\n ]\n)\n_D('MX Vertical Wireless Mouse', codename='MX Vertical', protocol=4.5, wpid='407B', btid=0xb020, usbid=0xc08a)\n_D('Wireless Mouse Pebble M350', protocol=2.0, wpid='4080', codename='Pebble')\n_D('MX Master 3 Wireless Mouse', codename='MX Master 3', protocol=4.5, wpid='4082', btid=0xb023)\n_D('PRO X Wireless', kind='mouse', codename='PRO X', wpid='4093', usbid=0xc094)\n_D('G402 Gaming Mouse', codename='G402', usbid=0xc07e, interface=1)\n_D('G900 Chaos Spectrum Gaming Mouse', codename='G900', usbid=0xc081)\n_D('G403 Gaming Mouse', codename='G403', usbid=0xc082)\n_D('G903 Lightspeed Gaming Mouse', codename='G903', usbid=0xc086)\n_D('G703 Lightspeed Gaming Mouse', codename='G703', usbid=0xc087)\n_D('GPro Gaming Mouse', codename='GPro', usbid=0xc088)\n_D('G502 Hero Gaming Mouse', codename='G502 Hero', usbid=0xc08d)\n_D('G703 Hero Gaming Mouse', codename='G703 Hero', usbid=0xc090)\n_D('G903 Hero Gaming Mouse', codename='G903 Hero', usbid=0xc091)\n_D('G102 Lightsync Mouse', codename='G102', usbid=0xc092, interface=1)\n_D('M500S Mouse', codename='M500S', usbid=0xc093, interface=1)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n\n# Headset\n\n_D('Logitech PRO X Wireless Gaming Headset', codename='PRO Headset', protocol=2.0, interface=3, kind=_DK.headset, usbid=0x0aba)\n", "path": "lib/logitech_receiver/descriptors.py" } ]
[ { "content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom collections import namedtuple\n\nfrom . import settings_templates as _ST\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import DEVICE_KIND as _DK\nfrom .hidpp10 import REGISTERS as _R\n\n#\n#\n#\n\n_DeviceDescriptor = namedtuple(\n '_DeviceDescriptor',\n ('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings', 'persister', 'usbid', 'interface', 'btid')\n)\ndel namedtuple\n\nDEVICES_WPID = {}\nDEVICES = {}\n\n\ndef _D(\n name,\n codename=None,\n kind=None,\n wpid=None,\n protocol=None,\n registers=None,\n settings=None,\n persister=None,\n usbid=None,\n interface=None,\n btid=None,\n):\n assert name\n\n if kind is None:\n kind = (\n _DK.mouse if 'Mouse' in name else _DK.keyboard if 'Keyboard' in name else _DK.numpad\n if 'Number Pad' in name else _DK.touchpad if 'Touchpad' in name else _DK.trackball if 'Trackball' in name else None\n )\n assert kind is not None, 'descriptor for %s does not have kind set' % name\n\n # heuristic: the codename is the last word in the device name\n if codename is None and ' ' in name:\n codename = name.split(' ')[-1]\n assert codename is not None, 'descriptor for %s does not have codename set' % name\n\n if protocol is not None:\n\n if wpid:\n for w in wpid if isinstance(wpid, tuple) else (wpid, ):\n if protocol > 1.0:\n assert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n else:\n if w[0:1] == '1':\n assert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n elif w[0:1] == '2':\n assert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n device_descriptor = _DeviceDescriptor(\n name=name,\n kind=kind,\n wpid=wpid,\n codename=codename,\n protocol=protocol,\n registers=registers,\n settings=settings,\n persister=persister,\n usbid=usbid,\n interface=interface,\n btid=btid\n )\n\n if usbid:\n found = get_usbid(usbid)\n assert found is None, 'duplicate usbid in device descriptors: %s' % (found, )\n if btid:\n found = get_btid(btid)\n assert found is None, 'duplicate btid in device descriptors: %s' % (found, )\n\n assert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n DEVICES[codename] = device_descriptor\n\n if wpid:\n for w in wpid if isinstance(wpid, tuple) else (wpid, ):\n assert w not in DEVICES_WPID, 'duplicate wpid in device descriptors: %s' % (DEVICES_WPID[w], )\n DEVICES_WPID[w] = device_descriptor\n\n\ndef get_wpid(wpid):\n return DEVICES_WPID.get(wpid)\n\n\ndef get_codename(codename):\n return DEVICES.get(codename)\n\n\ndef get_usbid(usbid):\n if isinstance(usbid, str):\n usbid = int(usbid, 16)\n found = next((x for x in DEVICES.values() if x.usbid == usbid), None)\n return found\n\n\ndef get_btid(btid):\n if isinstance(btid, str):\n btid = int(btid, 16)\n found = next((x for x in DEVICES.values() if x.btid == btid), None)\n return found\n\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Devices are organized by kind\n# Within kind devices are sorted by wpid, then by usbid, then by btid, with missing values sorted later\n\n# Keyboards\n\n_D('Wireless Keyboard S510', codename='S510', protocol=1.0, wpid='0056', registers=(_R.battery_status, ))\n_D('Wireless Keyboard EX100', codename='EX100', protocol=1.0, wpid='0065', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK300', protocol=1.0, wpid='0068', registers=(_R.battery_status, ))\n_D('Number Pad N545', protocol=1.0, wpid='2006', registers=(_R.battery_status, ))\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008', registers=(_R.battery_status, ), settings=[_ST.RegisterFnSwap])\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A', registers=(_R.battery_status, ))\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F', registers=(_R.battery_status, ))\n_D(\n 'Wireless Illuminated Keyboard K800',\n protocol=1.0,\n wpid='2010',\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterFnSwap,\n _ST.RegisterHandDetection,\n ],\n)\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011', registers=(_R.battery_status, ), settings=[_ST.RegisterFnSwap])\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002', settings=[_ST.FnSwap])\n_D('Wireless Keyboard K270 (unifying)', protocol=2.0, wpid='4003')\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004', settings=[_ST.FnSwap])\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'), settings=[_ST.FnSwap])\n_D('Wireless Keyboard MK270', protocol=2.0, wpid='4023', settings=[_ST.FnSwap])\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032', settings=[_ST.NewFnSwap])\n_D('Wireless Touch Keyboard K400 Plus', codename='K400 Plus', protocol=2.0, wpid='404D')\n_D('Wireless Multi-Device Keyboard K780', protocol=4.5, wpid='405B', settings=[_ST.NewFnSwap])\n_D('Wireless Keyboard K375s', protocol=2.0, wpid='4061', settings=[_ST.K375sFnSwap])\n_D('Craft Advanced Keyboard', codename='Craft', protocol=4.5, wpid='4066', btid=0xB350)\n_D('Wireless Illuminated Keyboard K800 new', codename='K800 new', protocol=4.5, wpid='406E', settings=[_ST.FnSwap])\n_D('MX Keys Keyboard', codename='MX Keys', protocol=4.5, wpid='408A', btid=0xB35B)\n_D('G915 TKL LIGHTSPEED Wireless RGB Mechanical Gaming Keyboard', codename='G915 TKL', protocol=4.2, wpid='408E', usbid=0xC343)\n_D('G512 RGB Mechanical Gaming Keyboard', codename='G512', usbid=0xc33c, interface=1)\n\n# Mice\n\n_D('LX5 Cordless Mouse', codename='LX5', protocol=1.0, wpid='0036', registers=(_R.battery_status, ))\n_D('Wireless Mouse EX100', codename='EX100m', protocol=1.0, wpid='003F', registers=(_R.battery_status, ))\n_D('Wireless Mouse M30', codename='M30', protocol=1.0, wpid='0085', registers=(_R.battery_status, ))\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001', registers=(_R.battery_status, ))\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002', registers=(_R.battery_status, ))\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003', registers=(_R.battery_status, ))\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004', registers=(_R.battery_status, ))\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005', registers=(_R.battery_status, ))\n_D(\n 'VX Revolution',\n codename='VX Revolution',\n kind=_DK.mouse,\n protocol=1.0,\n wpid=('1006', '100D', '0612'),\n registers=(_R.battery_charge, )\n)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'), registers=(_R.battery_charge, ))\n_D(\n 'MX Revolution',\n codename='MX Revolution',\n protocol=1.0,\n kind=_DK.mouse,\n wpid=('1008', '100C'),\n registers=(_R.battery_charge, )\n)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'), registers=(_R.battery_charge, ))\n_D(\n 'VX Nano Cordless Laser Mouse',\n codename='VX Nano',\n protocol=1.0,\n wpid=('100B', '100F'),\n registers=(_R.battery_charge, ),\n settings=[_ST.RegisterSmoothScroll, _ST.RegisterSideScroll]\n)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011', registers=(_R.battery_charge, ))\n_D(\n 'V550 Nano Cordless Laser Mouse',\n codename='V550 Nano',\n protocol=1.0,\n wpid='1013',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'MX 1100 Cordless Laser Mouse',\n codename='MX 1100',\n protocol=1.0,\n kind=_DK.mouse,\n wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'Anywhere Mouse MX',\n codename='Anywhere MX',\n protocol=1.0,\n wpid='1017',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n\n\nclass _PerformanceMXDpi(_ST.RegisterDpi):\n choices_universe = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n validator_options = {'choices': choices_universe}\n\n\n_D(\n 'Performance Mouse MX',\n codename='Performance MX',\n protocol=1.0,\n wpid='101A',\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _PerformanceMXDpi,\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D(\n 'Marathon Mouse M705 (M-R0009)',\n codename='M705 (M-R0009)',\n protocol=1.0,\n wpid='101B',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C', registers=(_R.battery_charge, ))\n_D(\n 'Wireless Mouse M505',\n codename='M505/B605',\n protocol=1.0,\n wpid='101D',\n registers=(_R.battery_charge, ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F', registers=(_R.battery_status, ), settings=[_ST.RegisterSideScroll])\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D(\n 'G700 Gaming Mouse',\n codename='G700',\n protocol=1.0,\n wpid='1023',\n usbid=0xc06b,\n interface=1,\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024', registers=(_R.battery_status, ))\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025', registers=(_R.battery_status, ), settings=[_ST.RegisterSideScroll])\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n_D(\n 'G700s Gaming Mouse',\n codename='G700s',\n protocol=1.0,\n wpid='102A',\n usbid=0xc07c,\n interface=1,\n registers=(\n _R.battery_status,\n _R.three_leds,\n ),\n settings=[\n _ST.RegisterSmoothScroll,\n _ST.RegisterSideScroll,\n ],\n)\n\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A', settings=[_ST.HiResScroll])\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Wireless Mouse M150', protocol=2.0, wpid='4022')\n_D('Wireless Mouse M185', protocol=2.0, wpid='4038')\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041', btid=0xb012)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A', settings=[_ST.HiresSmoothInvert])\n_D('Wireless Mouse M510', protocol=2.0, wpid='4051', codename='M510v2', settings=[_ST.LowresSmoothScroll])\n_D(\n 'Wireless Mouse M185 new',\n codename='M185n',\n protocol=4.5,\n wpid='4054',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ]\n)\n_D(\n 'Wireless Mouse M185/M235/M310',\n codename='M185/M235/M310',\n protocol=4.5,\n wpid='4055',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ]\n)\n_D(\n 'Wireless Mouse MX Master 2S',\n codename='MX Master 2S',\n protocol=4.5,\n wpid='4069',\n btid=0xb019,\n settings=[\n _ST.HiresSmoothInvert,\n ],\n)\n_D(\n 'Multi Device Silent Mouse M585/M590',\n codename='M585/M590',\n protocol=4.5,\n wpid='406B',\n settings=[\n _ST.LowresSmoothScroll,\n _ST.PointerSpeed,\n ],\n)\n_D(\n 'Marathon Mouse M705 (M-R0073)',\n codename='M705 (M-R0073)',\n protocol=4.5,\n wpid='406D',\n settings=[\n _ST.HiresSmoothInvert,\n _ST.PointerSpeed,\n ]\n)\n_D('MX Vertical Wireless Mouse', codename='MX Vertical', protocol=4.5, wpid='407B', btid=0xb020, usbid=0xc08a)\n_D('Wireless Mouse Pebble M350', protocol=2.0, wpid='4080', codename='Pebble')\n_D('MX Master 3 Wireless Mouse', codename='MX Master 3', protocol=4.5, wpid='4082', btid=0xb023)\n_D('PRO X Wireless', kind='mouse', codename='PRO X', wpid='4093', usbid=0xc094)\n_D('G402 Gaming Mouse', codename='G402', usbid=0xc07e, interface=1)\n_D('G900 Chaos Spectrum Gaming Mouse', codename='G900', usbid=0xc081)\n_D('G403 Gaming Mouse', codename='G403', usbid=0xc082)\n_D('G903 Lightspeed Gaming Mouse', codename='G903', usbid=0xc086)\n_D('G703 Lightspeed Gaming Mouse', codename='G703', usbid=0xc087)\n_D('GPro Gaming Mouse', codename='GPro', usbid=0xc088)\n_D('G502 Hero Gaming Mouse', codename='G502 Hero', usbid=0xc08d)\n_D('G703 Hero Gaming Mouse', codename='G703 Hero', usbid=0xc090)\n_D('G903 Hero Gaming Mouse', codename='G903 Hero', usbid=0xc091)\n_D('G102 Lightsync Mouse', codename='G102', usbid=0xc092, interface=1)\n_D('M500S Mouse', codename='M500S', usbid=0xc093, interface=1)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n\n# Headset\n\n_D('Logitech G733 Gaming Headset', codename='G733 Headset', protocol=2.0, interface=3, kind=_DK.headset, usbid=0x0ab5)\n_D('Logitech PRO X Wireless Gaming Headset', codename='PRO Headset', protocol=2.0, interface=3, kind=_DK.headset, usbid=0x0aba)\n", "path": "lib/logitech_receiver/descriptors.py" } ]
diff --git a/lib/logitech_receiver/descriptors.py b/lib/logitech_receiver/descriptors.py index 0793ab8859..8af064baca 100644 --- a/lib/logitech_receiver/descriptors.py +++ b/lib/logitech_receiver/descriptors.py @@ -463,4 +463,5 @@ class _PerformanceMXDpi(_ST.RegisterDpi): # Headset +_D('Logitech G733 Gaming Headset', codename='G733 Headset', protocol=2.0, interface=3, kind=_DK.headset, usbid=0x0ab5) _D('Logitech PRO X Wireless Gaming Headset', codename='PRO Headset', protocol=2.0, interface=3, kind=_DK.headset, usbid=0x0aba)
rasterio__rasterio-1754
Proposal: Add option in CRS.from_user_input to check if it has the to_wkt method https://github.com/mapbox/rasterio/blob/b9f34ee559039239c7c0c97bd911b466701a39cd/rasterio/crs.py#L427-L456 It would be nice for interoperability between the rasterio, fiona, and pyproj CRS classes. Thoughts?
[ { "content": "\"\"\"Coordinate Reference Systems\n\nNotes\n-----\n\nIn Rasterio versions <= 1.0.13, coordinate reference system support was limited\nto the CRS that can be described by PROJ parameters. This limitation is gone in\nversions >= 1.0.14. Any CRS that can be defined using WKT (version 1) may be\nused.\n\n\"\"\"\n\nimport collections\nimport json\nimport pickle\n\nfrom rasterio._crs import _CRS, all_proj_keys\nfrom rasterio.compat import Mapping, string_types\nfrom rasterio.errors import CRSError\n\n\nclass CRS(Mapping):\n \"\"\"A geographic or projected coordinate reference system\n\n CRS objects may be created by passing PROJ parameters as keyword\n arguments to the standard constructor or by passing EPSG codes, PROJ\n mappings, PROJ strings, or WKT strings to the from_epsg, from_dict,\n from_string, or from_wkt class methods or static methods.\n\n Examples\n --------\n\n The from_dict method takes PROJ parameters as keyword arguments.\n\n >>> crs = CRS.from_dict(init='epsg:3005')\n\n EPSG codes may be used with the from_epsg method.\n\n >>> crs = CRS.from_epsg(3005)\n\n The from_string method takes a variety of input.\n\n >>> crs = CRS.from_string('EPSG:3005')\n\n \"\"\"\n def __init__(self, initialdata=None, **kwargs):\n \"\"\"Make a CRS from a PROJ dict or mapping\n\n Parameters\n ----------\n initialdata : mapping, optional\n A dictionary or other mapping\n kwargs : mapping, optional\n Another mapping. Will be overlaid on the initialdata.\n\n Returns\n -------\n CRS\n\n \"\"\"\n self._wkt = None\n self._data = None\n self._crs = None\n\n if initialdata or kwargs:\n data = dict(initialdata or {})\n data.update(**kwargs)\n data = {k: v for k, v in data.items() if k in all_proj_keys}\n self._crs = _CRS.from_dict(data)\n\n else:\n self._crs = _CRS()\n\n def __getitem__(self, item):\n return self.data[item]\n\n def __iter__(self):\n return iter(self.data)\n\n def __len__(self):\n return len(self.data)\n\n def __bool__(self):\n return bool(self.wkt)\n\n __nonzero__ = __bool__\n\n def __eq__(self, other):\n other = CRS.from_user_input(other)\n return (self._crs == other._crs)\n\n def __getstate__(self):\n return self.to_wkt()\n\n def __setstate__(self, state):\n self._wkt = None\n self._data = None\n self._crs = _CRS.from_wkt(state)\n\n def __copy__(self):\n return pickle.loads(pickle.dumps(self))\n\n def __hash__(self):\n return hash(self.to_wkt())\n\n def to_proj4(self):\n \"\"\"Convert CRS to a PROJ4 string\n\n Returns\n -------\n str\n\n \"\"\"\n return ' '.join(['+{}={}'.format(key, val) for key, val in self.data.items()])\n\n def to_wkt(self, morph_to_esri_dialect=False):\n \"\"\"Convert CRS to its OGC WKT representation\n\n Parameters\n ----------\n morph_to_esri_dialect : bool, optional\n Whether or not to morph to the Esri dialect of WKT\n\n Returns\n -------\n str\n\n \"\"\"\n return self._crs.to_wkt(morph_to_esri_dialect=morph_to_esri_dialect)\n\n @property\n def wkt(self):\n \"\"\"An OGC WKT representation of the CRS\n\n Returns\n -------\n str\n\n \"\"\"\n if not self._wkt:\n self._wkt = self.to_wkt()\n return self._wkt\n\n def to_epsg(self):\n \"\"\"The epsg code of the CRS\n\n Returns None if there is no corresponding EPSG code.\n\n Returns\n -------\n int\n\n \"\"\"\n return self._crs.to_epsg()\n\n def to_dict(self):\n \"\"\"Convert CRS to a PROJ4 dict\n\n Notes\n -----\n If there is a corresponding EPSG code, it will be used.\n\n Returns\n -------\n dict\n\n \"\"\"\n if self._crs is None:\n raise CRSError(\"Undefined CRS has no dict representation\")\n\n else:\n epsg_code = self.to_epsg()\n if epsg_code:\n return {'init': 'epsg:{}'.format(epsg_code)}\n else:\n try:\n return self._crs.to_dict()\n except CRSError:\n return {}\n\n @property\n def data(self):\n \"\"\"A PROJ4 dict representation of the CRS\"\"\"\n if not self._data:\n self._data = self.to_dict()\n return self._data\n\n @property\n def is_geographic(self):\n \"\"\"Test that the CRS is a geographic CRS\n\n Returns\n -------\n bool\n\n \"\"\"\n return self._crs.is_geographic\n\n @property\n def is_projected(self):\n \"\"\"Test that the CRS is a projected CRS\n\n Returns\n -------\n bool\n\n \"\"\"\n return self._crs.is_projected\n\n @property\n def is_valid(self):\n \"\"\"Test that the CRS is a geographic or projected CRS\n\n Notes\n -----\n There are other types of CRS, such as compound or local or\n engineering CRS, but these are not supported in Rasterio 1.0.\n\n Returns\n -------\n bool\n\n \"\"\"\n return self.is_geographic or self.is_projected\n\n @property\n def is_epsg_code(self):\n \"\"\"Test if the CRS is defined by an EPSG code\n\n Returns\n -------\n bool\n\n \"\"\"\n try:\n return bool(self.to_epsg())\n except CRSError:\n return False\n\n @property\n def linear_units(self):\n \"\"\"The linear units of the CRS\n\n Possible values include \"metre\" and \"US survey foot\".\n\n Returns\n -------\n str\n\n \"\"\"\n return self._crs.linear_units\n\n def to_string(self):\n \"\"\"Convert CRS to a PROJ4 or WKT string\n\n Notes\n -----\n\n Mapping keys are tested against the ``all_proj_keys`` list.\n Values of ``True`` are omitted, leaving the key bare:\n {'no_defs': True} -> \"+no_defs\" and items where the value is\n otherwise not a str, int, or float are omitted.\n\n Returns\n -------\n str\n\n \"\"\"\n epsg_code = self.to_epsg()\n if epsg_code:\n return 'EPSG:{}'.format(epsg_code)\n else:\n return self.to_wkt() or self.to_proj4()\n\n __str__ = to_string\n\n def __repr__(self):\n epsg_code = self.to_epsg()\n if epsg_code:\n return \"CRS.from_epsg({})\".format(epsg_code)\n else:\n return \"CRS.from_wkt('{}')\".format(self.wkt)\n\n @classmethod\n def from_epsg(cls, code):\n \"\"\"Make a CRS from an EPSG code\n\n Parameters\n ----------\n code : int or str\n An EPSG code. Strings will be converted to integers.\n\n Notes\n -----\n The input code is not validated against an EPSG database.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_epsg(code)\n return obj\n\n @classmethod\n def from_string(cls, string, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from an EPSG, PROJ, or WKT string\n\n Parameters\n ----------\n string : str\n An EPSG, PROJ, or WKT string.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n if not string:\n raise CRSError(\"CRS is empty or invalid: {!r}\".format(string))\n\n elif string.strip().upper().startswith('EPSG:'):\n auth, val = string.strip().split(':')\n if not val:\n raise CRSError(\"Invalid CRS: {!r}\".format(string))\n return cls.from_epsg(val)\n\n elif string.startswith('{') or string.startswith('['):\n # may be json, try to decode it\n try:\n val = json.loads(string, strict=False)\n except ValueError:\n raise CRSError('CRS appears to be JSON but is not valid')\n\n if not val:\n raise CRSError(\"CRS is empty JSON\")\n else:\n return cls.from_dict(**val)\n\n elif '+' in string and '=' in string:\n return cls.from_proj4(string)\n\n else:\n return cls.from_wkt(string, morph_from_esri_dialect=morph_from_esri_dialect)\n\n @classmethod\n def from_proj4(cls, proj):\n \"\"\"Make a CRS from a PROJ4 string\n\n Parameters\n ----------\n proj : str\n A PROJ4 string like \"+proj=longlat ...\"\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_proj4(proj)\n return obj\n\n @classmethod\n def from_dict(cls, initialdata=None, **kwargs):\n \"\"\"Make a CRS from a PROJ dict\n\n Parameters\n ----------\n initialdata : mapping, optional\n A dictionary or other mapping\n kwargs : mapping, optional\n Another mapping. Will be overlaid on the initialdata.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_dict(initialdata, **kwargs)\n return obj\n\n @classmethod\n def from_wkt(cls, wkt, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from a WKT string\n\n Parameters\n ----------\n wkt : str\n A WKT string.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_wkt(wkt, morph_from_esri_dialect=morph_from_esri_dialect)\n return obj\n\n @classmethod\n def from_user_input(cls, value, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from various input\n\n Dispatches to from_epsg, from_proj, or from_string\n\n Parameters\n ----------\n value : obj\n A Python int, dict, or str.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n if isinstance(value, cls):\n return value\n elif hasattr(value, \"to_wkt\") and callable(value.to_wkt):\n return cls.from_wkt(\n value.to_wkt(),\n morph_from_esri_dialect=morph_from_esri_dialect,\n )\n elif isinstance(value, int):\n return cls.from_epsg(value)\n elif isinstance(value, dict):\n return cls(**value)\n elif isinstance(value, string_types):\n obj = cls()\n obj._crs = _CRS.from_user_input(value, morph_from_esri_dialect=morph_from_esri_dialect)\n return obj\n else:\n raise CRSError(\"CRS is invalid: {!r}\".format(value))\n", "path": "rasterio/crs.py" } ]
[ { "content": "\"\"\"Coordinate Reference Systems\n\nNotes\n-----\n\nIn Rasterio versions <= 1.0.13, coordinate reference system support was limited\nto the CRS that can be described by PROJ parameters. This limitation is gone in\nversions >= 1.0.14. Any CRS that can be defined using WKT (version 1) may be\nused.\n\n\"\"\"\n\nimport collections\nimport json\nimport pickle\n\nfrom rasterio._crs import _CRS, all_proj_keys\nfrom rasterio.compat import Mapping, string_types\nfrom rasterio.errors import CRSError\n\n\nclass CRS(Mapping):\n \"\"\"A geographic or projected coordinate reference system\n\n CRS objects may be created by passing PROJ parameters as keyword\n arguments to the standard constructor or by passing EPSG codes, PROJ\n mappings, PROJ strings, or WKT strings to the from_epsg, from_dict,\n from_string, or from_wkt class methods or static methods.\n\n Examples\n --------\n\n The from_dict method takes PROJ parameters as keyword arguments.\n\n >>> crs = CRS.from_dict(init='epsg:3005')\n\n EPSG codes may be used with the from_epsg method.\n\n >>> crs = CRS.from_epsg(3005)\n\n The from_string method takes a variety of input.\n\n >>> crs = CRS.from_string('EPSG:3005')\n\n \"\"\"\n def __init__(self, initialdata=None, **kwargs):\n \"\"\"Make a CRS from a PROJ dict or mapping\n\n Parameters\n ----------\n initialdata : mapping, optional\n A dictionary or other mapping\n kwargs : mapping, optional\n Another mapping. Will be overlaid on the initialdata.\n\n Returns\n -------\n CRS\n\n \"\"\"\n self._wkt = None\n self._data = None\n self._crs = None\n\n if initialdata or kwargs:\n data = dict(initialdata or {})\n data.update(**kwargs)\n data = {k: v for k, v in data.items() if k in all_proj_keys}\n self._crs = _CRS.from_dict(data)\n\n else:\n self._crs = _CRS()\n\n def __getitem__(self, item):\n return self.data[item]\n\n def __iter__(self):\n return iter(self.data)\n\n def __len__(self):\n return len(self.data)\n\n def __bool__(self):\n return bool(self.wkt)\n\n __nonzero__ = __bool__\n\n def __eq__(self, other):\n try:\n other = CRS.from_user_input(other)\n except CRSError:\n return False\n return (self._crs == other._crs)\n\n def __getstate__(self):\n return self.to_wkt()\n\n def __setstate__(self, state):\n self._wkt = None\n self._data = None\n self._crs = _CRS.from_wkt(state)\n\n def __copy__(self):\n return pickle.loads(pickle.dumps(self))\n\n def __hash__(self):\n return hash(self.to_wkt())\n\n def to_proj4(self):\n \"\"\"Convert CRS to a PROJ4 string\n\n Returns\n -------\n str\n\n \"\"\"\n return ' '.join(['+{}={}'.format(key, val) for key, val in self.data.items()])\n\n def to_wkt(self, morph_to_esri_dialect=False):\n \"\"\"Convert CRS to its OGC WKT representation\n\n Parameters\n ----------\n morph_to_esri_dialect : bool, optional\n Whether or not to morph to the Esri dialect of WKT\n\n Returns\n -------\n str\n\n \"\"\"\n return self._crs.to_wkt(morph_to_esri_dialect=morph_to_esri_dialect)\n\n @property\n def wkt(self):\n \"\"\"An OGC WKT representation of the CRS\n\n Returns\n -------\n str\n\n \"\"\"\n if not self._wkt:\n self._wkt = self.to_wkt()\n return self._wkt\n\n def to_epsg(self):\n \"\"\"The epsg code of the CRS\n\n Returns None if there is no corresponding EPSG code.\n\n Returns\n -------\n int\n\n \"\"\"\n return self._crs.to_epsg()\n\n def to_dict(self):\n \"\"\"Convert CRS to a PROJ4 dict\n\n Notes\n -----\n If there is a corresponding EPSG code, it will be used.\n\n Returns\n -------\n dict\n\n \"\"\"\n if self._crs is None:\n raise CRSError(\"Undefined CRS has no dict representation\")\n\n else:\n epsg_code = self.to_epsg()\n if epsg_code:\n return {'init': 'epsg:{}'.format(epsg_code)}\n else:\n try:\n return self._crs.to_dict()\n except CRSError:\n return {}\n\n @property\n def data(self):\n \"\"\"A PROJ4 dict representation of the CRS\"\"\"\n if not self._data:\n self._data = self.to_dict()\n return self._data\n\n @property\n def is_geographic(self):\n \"\"\"Test that the CRS is a geographic CRS\n\n Returns\n -------\n bool\n\n \"\"\"\n return self._crs.is_geographic\n\n @property\n def is_projected(self):\n \"\"\"Test that the CRS is a projected CRS\n\n Returns\n -------\n bool\n\n \"\"\"\n return self._crs.is_projected\n\n @property\n def is_valid(self):\n \"\"\"Test that the CRS is a geographic or projected CRS\n\n Notes\n -----\n There are other types of CRS, such as compound or local or\n engineering CRS, but these are not supported in Rasterio 1.0.\n\n Returns\n -------\n bool\n\n \"\"\"\n return self.is_geographic or self.is_projected\n\n @property\n def is_epsg_code(self):\n \"\"\"Test if the CRS is defined by an EPSG code\n\n Returns\n -------\n bool\n\n \"\"\"\n try:\n return bool(self.to_epsg())\n except CRSError:\n return False\n\n @property\n def linear_units(self):\n \"\"\"The linear units of the CRS\n\n Possible values include \"metre\" and \"US survey foot\".\n\n Returns\n -------\n str\n\n \"\"\"\n return self._crs.linear_units\n\n def to_string(self):\n \"\"\"Convert CRS to a PROJ4 or WKT string\n\n Notes\n -----\n\n Mapping keys are tested against the ``all_proj_keys`` list.\n Values of ``True`` are omitted, leaving the key bare:\n {'no_defs': True} -> \"+no_defs\" and items where the value is\n otherwise not a str, int, or float are omitted.\n\n Returns\n -------\n str\n\n \"\"\"\n epsg_code = self.to_epsg()\n if epsg_code:\n return 'EPSG:{}'.format(epsg_code)\n else:\n return self.to_wkt() or self.to_proj4()\n\n __str__ = to_string\n\n def __repr__(self):\n epsg_code = self.to_epsg()\n if epsg_code:\n return \"CRS.from_epsg({})\".format(epsg_code)\n else:\n return \"CRS.from_wkt('{}')\".format(self.wkt)\n\n @classmethod\n def from_epsg(cls, code):\n \"\"\"Make a CRS from an EPSG code\n\n Parameters\n ----------\n code : int or str\n An EPSG code. Strings will be converted to integers.\n\n Notes\n -----\n The input code is not validated against an EPSG database.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_epsg(code)\n return obj\n\n @classmethod\n def from_string(cls, string, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from an EPSG, PROJ, or WKT string\n\n Parameters\n ----------\n string : str\n An EPSG, PROJ, or WKT string.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n if not string:\n raise CRSError(\"CRS is empty or invalid: {!r}\".format(string))\n\n elif string.strip().upper().startswith('EPSG:'):\n auth, val = string.strip().split(':')\n if not val:\n raise CRSError(\"Invalid CRS: {!r}\".format(string))\n return cls.from_epsg(val)\n\n elif string.startswith('{') or string.startswith('['):\n # may be json, try to decode it\n try:\n val = json.loads(string, strict=False)\n except ValueError:\n raise CRSError('CRS appears to be JSON but is not valid')\n\n if not val:\n raise CRSError(\"CRS is empty JSON\")\n else:\n return cls.from_dict(**val)\n\n elif '+' in string and '=' in string:\n return cls.from_proj4(string)\n\n else:\n return cls.from_wkt(string, morph_from_esri_dialect=morph_from_esri_dialect)\n\n @classmethod\n def from_proj4(cls, proj):\n \"\"\"Make a CRS from a PROJ4 string\n\n Parameters\n ----------\n proj : str\n A PROJ4 string like \"+proj=longlat ...\"\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_proj4(proj)\n return obj\n\n @classmethod\n def from_dict(cls, initialdata=None, **kwargs):\n \"\"\"Make a CRS from a PROJ dict\n\n Parameters\n ----------\n initialdata : mapping, optional\n A dictionary or other mapping\n kwargs : mapping, optional\n Another mapping. Will be overlaid on the initialdata.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_dict(initialdata, **kwargs)\n return obj\n\n @classmethod\n def from_wkt(cls, wkt, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from a WKT string\n\n Parameters\n ----------\n wkt : str\n A WKT string.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n obj = cls()\n obj._crs = _CRS.from_wkt(wkt, morph_from_esri_dialect=morph_from_esri_dialect)\n return obj\n\n @classmethod\n def from_user_input(cls, value, morph_from_esri_dialect=False):\n \"\"\"Make a CRS from various input\n\n Dispatches to from_epsg, from_proj, or from_string\n\n Parameters\n ----------\n value : obj\n A Python int, dict, or str.\n morph_from_esri_dialect : bool, optional\n If True, items in the input using Esri's dialect of WKT\n will be replaced by OGC standard equivalents.\n\n Returns\n -------\n CRS\n\n \"\"\"\n if isinstance(value, cls):\n return value\n elif hasattr(value, \"to_wkt\") and callable(value.to_wkt):\n return cls.from_wkt(\n value.to_wkt(),\n morph_from_esri_dialect=morph_from_esri_dialect,\n )\n elif isinstance(value, int):\n return cls.from_epsg(value)\n elif isinstance(value, dict):\n return cls(**value)\n elif isinstance(value, string_types):\n obj = cls()\n obj._crs = _CRS.from_user_input(value, morph_from_esri_dialect=morph_from_esri_dialect)\n return obj\n else:\n raise CRSError(\"CRS is invalid: {!r}\".format(value))\n", "path": "rasterio/crs.py" } ]
diff --git a/CHANGES.txt b/CHANGES.txt index cfc3bba79..715d4975e 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,7 +12,10 @@ Changes - Several Python deprecation warnings have been eliminated (#1742). - When a MemoryFile is opened in write mode, a TypeError will be raised if integer width and height are not given, fixing #1748. -- Support CRS.from_user_input with other CRS object with `to_wkt()` method (#1718) +- Return False when checking equality with objects incompatible with + ``CRS.from_user_input()`` (#1719) +- Support CRS.from_user_input with other CRS object with ``to_wkt()`` method + (#1718) 1.0.25 (2019-08-06) ------------------- diff --git a/rasterio/crs.py b/rasterio/crs.py index a6b7869df..b027a4f2f 100644 --- a/rasterio/crs.py +++ b/rasterio/crs.py @@ -86,7 +86,10 @@ def __bool__(self): __nonzero__ = __bool__ def __eq__(self, other): - other = CRS.from_user_input(other) + try: + other = CRS.from_user_input(other) + except CRSError: + return False return (self._crs == other._crs) def __getstate__(self): diff --git a/tests/test_crs.py b/tests/test_crs.py index 86cc7497e..b3f3fcd64 100644 --- a/tests/test_crs.py +++ b/tests/test_crs.py @@ -478,5 +478,12 @@ def test_crs84(): assert "WGS 84" in CRS.from_user_input("urn:ogc:def:crs:OGC::CRS84").wkt [email protected]("other", ["", 4.2, 0]) +def test_equals_different_type(other): + """Comparison to non-CRS objects is False""" + assert CRS.from_epsg(4326) != other + + def test_from_user_input_custom_crs_class(): - assert CRS.from_user_input(CustomCRS()) == CRS.from_epsg(4326) + """Support comparison to foreign objects that provide to_wkt()""" + assert CRS.from_user_input(CustomCRS()) == CRS.from_epsg(4326) \ No newline at end of file
Qiskit__qiskit-1940
rzz gate <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.7.2 - **Python version**: 3.6.6 - **Operating system**: Windows 10 ### What is the current behavior? rzz gate appears to give incorrect results ### Steps to reproduce the problem rzz gate rule defined in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/extensions/standard/rzz.py ``` CnotGate(q[0], q[1]), U1Gate(self.params[0], q[0]), CnotGate(q[0], q[1]) ``` ### What is the expected behavior? I think it should be ``` CnotGate(q[0], q[1]), U1Gate(self.params[0], q[1]), CnotGate(q[0], q[1]) ``` the u1 phase should be on the target instead of control ### Suggested solutions modify rzz gate definition to give the right behavior.
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\ntwo-qubit ZZ-rotation gate.\n\"\"\"\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass RZZGate(Gate):\n \"\"\"Two-qubit ZZ-rotation gate.\"\"\"\n\n def __init__(self, theta, ctl, tgt, circ=None):\n \"\"\"Create new rzz gate.\"\"\"\n super().__init__(\"rzz\", [theta], [ctl, tgt], circ)\n\n def _define_decompositions(self):\n \"\"\"\n gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }\n \"\"\"\n decomposition = DAGCircuit()\n q = QuantumRegister(2, \"q\")\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n U1Gate(self.params[0], q[0]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n self.params[0] = -self.params[0]\n self._decompositions = None\n return self\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))\n\n\n@_op_expand(2, broadcastable=[False, False])\ndef rzz(self, theta, qubit1, qubit2):\n \"\"\"Apply RZZ to circuit.\"\"\"\n self._check_qubit(qubit1)\n self._check_qubit(qubit2)\n self._check_dups([qubit1, qubit2])\n return self._attach(RZZGate(theta, qubit1, qubit2, self))\n\n\n# Add to QuantumCircuit and CompositeGate classes\nQuantumCircuit.rzz = rzz\nCompositeGate.rzz = rzz\n", "path": "qiskit/extensions/standard/rzz.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\ntwo-qubit ZZ-rotation gate.\n\"\"\"\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass RZZGate(Gate):\n \"\"\"Two-qubit ZZ-rotation gate.\"\"\"\n\n def __init__(self, theta, ctl, tgt, circ=None):\n \"\"\"Create new rzz gate.\"\"\"\n super().__init__(\"rzz\", [theta], [ctl, tgt], circ)\n\n def _define_decompositions(self):\n \"\"\"\n gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }\n \"\"\"\n decomposition = DAGCircuit()\n q = QuantumRegister(2, \"q\")\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n U1Gate(self.params[0], q[1]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n self.params[0] = -self.params[0]\n self._decompositions = None\n return self\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))\n\n\n@_op_expand(2, broadcastable=[False, False])\ndef rzz(self, theta, qubit1, qubit2):\n \"\"\"Apply RZZ to circuit.\"\"\"\n self._check_qubit(qubit1)\n self._check_qubit(qubit2)\n self._check_dups([qubit1, qubit2])\n return self._attach(RZZGate(theta, qubit1, qubit2, self))\n\n\n# Add to QuantumCircuit and CompositeGate classes\nQuantumCircuit.rzz = rzz\nCompositeGate.rzz = rzz\n", "path": "qiskit/extensions/standard/rzz.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 36b89565b366..1d0058015624 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -95,6 +95,7 @@ Fixed used in conditional operations (#1705). - Fixed a bug that with transpile ignoring initial layout when coupling map is provided (#1711). +- Fixed a bug in the definition of the rzz gate (#1940). Removed ------- diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py index bba403cfef8e..d51147b140ae 100644 --- a/qiskit/extensions/standard/rzz.py +++ b/qiskit/extensions/standard/rzz.py @@ -34,7 +34,7 @@ def _define_decompositions(self): decomposition.add_qreg(q) rule = [ CnotGate(q[0], q[1]), - U1Gate(self.params[0], q[0]), + U1Gate(self.params[0], q[1]), CnotGate(q[0], q[1]) ] for inst in rule:
localstack__localstack-5793
bug: KMS_PROVIDER=local-kms not honored ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Behavior When updating from 0.14.1 to 0.14.2 it appears that the `KMS_PROVIDER` environment variable is no longer honored, and `local-kms` is not used. ### Expected Behavior Setting `KMS_PROVIDER=local-kms` and using the KMS service should launch `local-kms` as it did previously. ### How are you starting LocalStack? With a `docker run` command ### Steps To Reproduce #### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`) docker run -e KMS_PROVIDER=local-kms localstack/localstack:0.14.1 docker run -e KMS_PROVIDER=local-kms localstack/localstack:0.14.2 #### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands) ##### 0.14.1 awslocal kms list-keys # { # "Keys": [] # } ps x|grep kms # 164 ? Ss 0:00 /bin/sh -c /opt/code/localstack/localstack/infra/kms/local-kms.linux-arm64.bin # 165 ? Sl 0:00 /opt/code/localstack/localstack/infra/kms/local-kms.linux-arm64.bin # 179 pts/0 S+ 0:00 grep kms ##### 0.14.2 awslocal kms list-keys # { # "Keys": [] # } ps x|grep kms # 151 pts/0 S+ 0:00 grep kms ### Environment ```markdown - OS: macOS 12.3 - LocalStack: 0.14.2 - Docker: 20.10.13, build a224086 ``` ### Anything else? _No response_
[ { "content": "from localstack import config\nfrom localstack.aws.proxy import AwsApiListener\nfrom localstack.services.moto import MotoFallbackDispatcher\nfrom localstack.services.plugins import Service, aws_provider\n\n\n@aws_provider()\ndef acm():\n from localstack.services.acm.provider import AcmProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = AcmProvider()\n\n return Service(\"acm\", listener=AwsApiListener(\"acm\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef apigateway():\n from localstack.services.apigateway import apigateway_listener, apigateway_starter\n\n return Service(\n \"apigateway\",\n listener=apigateway_listener.UPDATE_APIGATEWAY,\n start=apigateway_starter.start_apigateway,\n )\n\n\n@aws_provider()\ndef cloudformation():\n from localstack.services.cloudformation import cloudformation_starter\n\n return Service(\"cloudformation\", start=cloudformation_starter.start_cloudformation)\n\n\n@aws_provider(api=\"config\")\ndef awsconfig():\n from localstack.services.configservice.provider import ConfigProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = ConfigProvider()\n return Service(\"config\", listener=AwsApiListener(\"config\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef cloudwatch():\n from localstack.services.cloudwatch.provider import CloudwatchProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = CloudwatchProvider()\n listener = AwsApiListener(\"cloudwatch\", MotoFallbackDispatcher(provider))\n\n return Service(\n \"cloudwatch\",\n listener=listener,\n lifecycle_hook=provider,\n )\n\n\n@aws_provider()\ndef dynamodb():\n from localstack.services.dynamodb import dynamodb_listener, dynamodb_starter\n\n return Service(\n \"dynamodb\",\n listener=dynamodb_listener.UPDATE_DYNAMODB,\n start=dynamodb_starter.start_dynamodb,\n check=dynamodb_starter.check_dynamodb,\n )\n\n\n@aws_provider()\ndef dynamodbstreams():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.dynamodbstreams.provider import DynamoDBStreamsProvider\n\n provider = DynamoDBStreamsProvider()\n return Service(\n \"dynamodbstreams\",\n listener=AwsApiListener(\"dynamodbstreams\", provider),\n lifecycle_hook=provider,\n )\n\n\n@aws_provider()\ndef ec2():\n from localstack.services.ec2.provider import Ec2Provider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = Ec2Provider()\n return Service(\n \"ec2\",\n listener=AwsApiListener(\"ec2\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef es():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.es.provider import EsProvider\n\n provider = EsProvider()\n return Service(\"es\", listener=AwsApiListener(\"es\", provider))\n\n\n@aws_provider()\ndef firehose():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.firehose.provider import FirehoseProvider\n\n provider = FirehoseProvider()\n return Service(\"firehose\", listener=AwsApiListener(\"firehose\", provider))\n\n\n@aws_provider()\ndef iam():\n from localstack.services.iam import iam_listener, iam_starter\n\n return Service(\"iam\", listener=iam_listener.UPDATE_IAM, start=iam_starter.start_iam)\n\n\n@aws_provider()\ndef sts():\n from localstack.services.sts.provider import StsAwsApiListener\n\n listener = StsAwsApiListener()\n return Service(\"sts\", listener=listener)\n\n\n@aws_provider()\ndef kinesis():\n from localstack.services.kinesis import kinesis_listener, kinesis_starter\n\n return Service(\n \"kinesis\",\n listener=kinesis_listener.UPDATE_KINESIS,\n start=kinesis_starter.start_kinesis,\n check=kinesis_starter.check_kinesis,\n )\n\n\n@aws_provider()\ndef kms():\n if config.KMS_PROVIDER == \"kms-local\":\n from localstack.services.kms import kms_starter\n\n return Service(\"kms\", start=kms_starter.start_kms_local)\n\n # fall back to default provider\n from localstack.services.kms.provider import KmsProvider\n\n provider = KmsProvider()\n return Service(\"kms\", listener=AwsApiListener(\"kms\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider(api=\"lambda\")\ndef awslambda():\n from localstack.services.awslambda import lambda_starter\n\n return Service(\n \"lambda\",\n start=lambda_starter.start_lambda,\n stop=lambda_starter.stop_lambda,\n check=lambda_starter.check_lambda,\n )\n\n\n@aws_provider()\ndef logs():\n from localstack.services.logs.provider import LogsAwsApiListener\n\n listener = LogsAwsApiListener()\n return Service(\"logs\", listener=listener)\n\n\n@aws_provider()\ndef opensearch():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.opensearch.provider import OpensearchProvider\n\n provider = OpensearchProvider()\n return Service(\"opensearch\", listener=AwsApiListener(\"opensearch\", provider))\n\n\n@aws_provider()\ndef redshift():\n from localstack.services.redshift.provider import RedshiftProvider\n\n provider = RedshiftProvider()\n listener = AwsApiListener(\"redshift\", MotoFallbackDispatcher(provider))\n\n return Service(\"redshift\", listener=listener)\n\n\n@aws_provider()\ndef route53():\n from localstack.services.route53.provider import Route53Provider\n\n provider = Route53Provider()\n\n return Service(\"route53\", listener=AwsApiListener(\"route53\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef route53resolver():\n from localstack.services.route53.provider import Route53ResolverApi\n\n provider = Route53ResolverApi()\n\n return Service(\n \"route53resolver\",\n listener=AwsApiListener(\"route53resolver\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef s3():\n from localstack.services.s3 import s3_listener, s3_starter\n\n return Service(\n \"s3\", listener=s3_listener.UPDATE_S3, start=s3_starter.start_s3, check=s3_starter.check_s3\n )\n\n\n@aws_provider()\ndef s3control():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.s3control.provider import S3ControlProvider\n\n provider = S3ControlProvider()\n return Service(\n \"s3control\", listener=AwsApiListener(\"s3control\", MotoFallbackDispatcher(provider))\n )\n\n\n@aws_provider()\ndef secretsmanager():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.secretsmanager.provider import SecretsmanagerProvider\n\n provider = SecretsmanagerProvider()\n return Service(\n \"secretsmanager\",\n listener=AwsApiListener(\"secretsmanager\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef ses():\n from localstack.services.ses import ses_listener, ses_starter\n\n return Service(\"ses\", listener=ses_listener.UPDATE_SES, start=ses_starter.start_ses)\n\n\n@aws_provider()\ndef sns():\n from localstack.services.sns import sns_listener, sns_starter\n\n return Service(\"sns\", listener=sns_listener.UPDATE_SNS, start=sns_starter.start_sns)\n\n\n@aws_provider()\ndef sqs():\n from localstack.services.sqs import sqs_listener, sqs_starter\n\n return Service(\n \"sqs\",\n listener=sqs_listener.UPDATE_SQS,\n start=sqs_starter.start_sqs,\n check=sqs_starter.check_sqs,\n )\n\n\n@aws_provider(api=\"sqs\", name=\"asf\")\ndef sqs_asf():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.sqs.provider import SqsProvider\n\n provider = SqsProvider()\n\n return Service(\"sqs\", listener=AwsApiListener(\"sqs\", provider), lifecycle_hook=provider)\n\n\n@aws_provider()\ndef ssm():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.ssm.provider import SsmProvider\n\n provider = SsmProvider()\n return Service(\n \"ssm\",\n listener=AwsApiListener(\"ssm\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef events():\n from localstack.services.events import events_listener, events_starter\n\n return Service(\n \"events\", listener=events_listener.UPDATE_EVENTS, start=events_starter.start_events\n )\n\n\n@aws_provider()\ndef stepfunctions():\n from localstack.services.stepfunctions import stepfunctions_listener, stepfunctions_starter\n\n return Service(\n \"stepfunctions\",\n listener=stepfunctions_listener.UPDATE_STEPFUNCTIONS,\n start=stepfunctions_starter.start_stepfunctions,\n check=stepfunctions_starter.check_stepfunctions,\n )\n\n\n@aws_provider()\ndef swf():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.swf.provider import SWFProvider\n\n provider = SWFProvider()\n return Service(\n \"swf\",\n listener=AwsApiListener(\"swf\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef resourcegroupstaggingapi():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.resourcegroupstaggingapi.provider import (\n ResourcegroupstaggingapiProvider,\n )\n\n provider = ResourcegroupstaggingapiProvider()\n return Service(\n \"resourcegroupstaggingapi\",\n listener=AwsApiListener(\"resourcegroupstaggingapi\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider(api=\"resource-groups\")\ndef resource_groups():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.resourcegroups.provider import ResourceGroupsProvider\n\n provider = ResourceGroupsProvider()\n return Service(\n \"resource-groups\",\n listener=AwsApiListener(\"resource-groups\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef support():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.support.provider import SupportProvider\n\n provider = SupportProvider()\n return Service(\n \"support\",\n listener=AwsApiListener(\"support\", MotoFallbackDispatcher(provider)),\n )\n", "path": "localstack/services/providers.py" } ]
[ { "content": "from localstack import config\nfrom localstack.aws.proxy import AwsApiListener\nfrom localstack.services.moto import MotoFallbackDispatcher\nfrom localstack.services.plugins import Service, aws_provider\n\n\n@aws_provider()\ndef acm():\n from localstack.services.acm.provider import AcmProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = AcmProvider()\n\n return Service(\"acm\", listener=AwsApiListener(\"acm\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef apigateway():\n from localstack.services.apigateway import apigateway_listener, apigateway_starter\n\n return Service(\n \"apigateway\",\n listener=apigateway_listener.UPDATE_APIGATEWAY,\n start=apigateway_starter.start_apigateway,\n )\n\n\n@aws_provider()\ndef cloudformation():\n from localstack.services.cloudformation import cloudformation_starter\n\n return Service(\"cloudformation\", start=cloudformation_starter.start_cloudformation)\n\n\n@aws_provider(api=\"config\")\ndef awsconfig():\n from localstack.services.configservice.provider import ConfigProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = ConfigProvider()\n return Service(\"config\", listener=AwsApiListener(\"config\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef cloudwatch():\n from localstack.services.cloudwatch.provider import CloudwatchProvider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = CloudwatchProvider()\n listener = AwsApiListener(\"cloudwatch\", MotoFallbackDispatcher(provider))\n\n return Service(\n \"cloudwatch\",\n listener=listener,\n lifecycle_hook=provider,\n )\n\n\n@aws_provider()\ndef dynamodb():\n from localstack.services.dynamodb import dynamodb_listener, dynamodb_starter\n\n return Service(\n \"dynamodb\",\n listener=dynamodb_listener.UPDATE_DYNAMODB,\n start=dynamodb_starter.start_dynamodb,\n check=dynamodb_starter.check_dynamodb,\n )\n\n\n@aws_provider()\ndef dynamodbstreams():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.dynamodbstreams.provider import DynamoDBStreamsProvider\n\n provider = DynamoDBStreamsProvider()\n return Service(\n \"dynamodbstreams\",\n listener=AwsApiListener(\"dynamodbstreams\", provider),\n lifecycle_hook=provider,\n )\n\n\n@aws_provider()\ndef ec2():\n from localstack.services.ec2.provider import Ec2Provider\n from localstack.services.moto import MotoFallbackDispatcher\n\n provider = Ec2Provider()\n return Service(\n \"ec2\",\n listener=AwsApiListener(\"ec2\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef es():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.es.provider import EsProvider\n\n provider = EsProvider()\n return Service(\"es\", listener=AwsApiListener(\"es\", provider))\n\n\n@aws_provider()\ndef firehose():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.firehose.provider import FirehoseProvider\n\n provider = FirehoseProvider()\n return Service(\"firehose\", listener=AwsApiListener(\"firehose\", provider))\n\n\n@aws_provider()\ndef iam():\n from localstack.services.iam import iam_listener, iam_starter\n\n return Service(\"iam\", listener=iam_listener.UPDATE_IAM, start=iam_starter.start_iam)\n\n\n@aws_provider()\ndef sts():\n from localstack.services.sts.provider import StsAwsApiListener\n\n listener = StsAwsApiListener()\n return Service(\"sts\", listener=listener)\n\n\n@aws_provider()\ndef kinesis():\n from localstack.services.kinesis import kinesis_listener, kinesis_starter\n\n return Service(\n \"kinesis\",\n listener=kinesis_listener.UPDATE_KINESIS,\n start=kinesis_starter.start_kinesis,\n check=kinesis_starter.check_kinesis,\n )\n\n\n@aws_provider()\ndef kms():\n if config.KMS_PROVIDER == \"local-kms\":\n from localstack.services.kms import kms_starter\n\n return Service(\"kms\", start=kms_starter.start_kms_local)\n\n # fall back to default provider\n from localstack.services.kms.provider import KmsProvider\n\n provider = KmsProvider()\n return Service(\"kms\", listener=AwsApiListener(\"kms\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider(api=\"lambda\")\ndef awslambda():\n from localstack.services.awslambda import lambda_starter\n\n return Service(\n \"lambda\",\n start=lambda_starter.start_lambda,\n stop=lambda_starter.stop_lambda,\n check=lambda_starter.check_lambda,\n )\n\n\n@aws_provider()\ndef logs():\n from localstack.services.logs.provider import LogsAwsApiListener\n\n listener = LogsAwsApiListener()\n return Service(\"logs\", listener=listener)\n\n\n@aws_provider()\ndef opensearch():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.opensearch.provider import OpensearchProvider\n\n provider = OpensearchProvider()\n return Service(\"opensearch\", listener=AwsApiListener(\"opensearch\", provider))\n\n\n@aws_provider()\ndef redshift():\n from localstack.services.redshift.provider import RedshiftProvider\n\n provider = RedshiftProvider()\n listener = AwsApiListener(\"redshift\", MotoFallbackDispatcher(provider))\n\n return Service(\"redshift\", listener=listener)\n\n\n@aws_provider()\ndef route53():\n from localstack.services.route53.provider import Route53Provider\n\n provider = Route53Provider()\n\n return Service(\"route53\", listener=AwsApiListener(\"route53\", MotoFallbackDispatcher(provider)))\n\n\n@aws_provider()\ndef route53resolver():\n from localstack.services.route53.provider import Route53ResolverApi\n\n provider = Route53ResolverApi()\n\n return Service(\n \"route53resolver\",\n listener=AwsApiListener(\"route53resolver\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef s3():\n from localstack.services.s3 import s3_listener, s3_starter\n\n return Service(\n \"s3\", listener=s3_listener.UPDATE_S3, start=s3_starter.start_s3, check=s3_starter.check_s3\n )\n\n\n@aws_provider()\ndef s3control():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.s3control.provider import S3ControlProvider\n\n provider = S3ControlProvider()\n return Service(\n \"s3control\", listener=AwsApiListener(\"s3control\", MotoFallbackDispatcher(provider))\n )\n\n\n@aws_provider()\ndef secretsmanager():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.secretsmanager.provider import SecretsmanagerProvider\n\n provider = SecretsmanagerProvider()\n return Service(\n \"secretsmanager\",\n listener=AwsApiListener(\"secretsmanager\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef ses():\n from localstack.services.ses import ses_listener, ses_starter\n\n return Service(\"ses\", listener=ses_listener.UPDATE_SES, start=ses_starter.start_ses)\n\n\n@aws_provider()\ndef sns():\n from localstack.services.sns import sns_listener, sns_starter\n\n return Service(\"sns\", listener=sns_listener.UPDATE_SNS, start=sns_starter.start_sns)\n\n\n@aws_provider()\ndef sqs():\n from localstack.services.sqs import sqs_listener, sqs_starter\n\n return Service(\n \"sqs\",\n listener=sqs_listener.UPDATE_SQS,\n start=sqs_starter.start_sqs,\n check=sqs_starter.check_sqs,\n )\n\n\n@aws_provider(api=\"sqs\", name=\"asf\")\ndef sqs_asf():\n from localstack.aws.proxy import AwsApiListener\n from localstack.services.sqs.provider import SqsProvider\n\n provider = SqsProvider()\n\n return Service(\"sqs\", listener=AwsApiListener(\"sqs\", provider), lifecycle_hook=provider)\n\n\n@aws_provider()\ndef ssm():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.ssm.provider import SsmProvider\n\n provider = SsmProvider()\n return Service(\n \"ssm\",\n listener=AwsApiListener(\"ssm\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef events():\n from localstack.services.events import events_listener, events_starter\n\n return Service(\n \"events\", listener=events_listener.UPDATE_EVENTS, start=events_starter.start_events\n )\n\n\n@aws_provider()\ndef stepfunctions():\n from localstack.services.stepfunctions import stepfunctions_listener, stepfunctions_starter\n\n return Service(\n \"stepfunctions\",\n listener=stepfunctions_listener.UPDATE_STEPFUNCTIONS,\n start=stepfunctions_starter.start_stepfunctions,\n check=stepfunctions_starter.check_stepfunctions,\n )\n\n\n@aws_provider()\ndef swf():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.swf.provider import SWFProvider\n\n provider = SWFProvider()\n return Service(\n \"swf\",\n listener=AwsApiListener(\"swf\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef resourcegroupstaggingapi():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.resourcegroupstaggingapi.provider import (\n ResourcegroupstaggingapiProvider,\n )\n\n provider = ResourcegroupstaggingapiProvider()\n return Service(\n \"resourcegroupstaggingapi\",\n listener=AwsApiListener(\"resourcegroupstaggingapi\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider(api=\"resource-groups\")\ndef resource_groups():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.resourcegroups.provider import ResourceGroupsProvider\n\n provider = ResourceGroupsProvider()\n return Service(\n \"resource-groups\",\n listener=AwsApiListener(\"resource-groups\", MotoFallbackDispatcher(provider)),\n )\n\n\n@aws_provider()\ndef support():\n from localstack.services.moto import MotoFallbackDispatcher\n from localstack.services.support.provider import SupportProvider\n\n provider = SupportProvider()\n return Service(\n \"support\",\n listener=AwsApiListener(\"support\", MotoFallbackDispatcher(provider)),\n )\n", "path": "localstack/services/providers.py" } ]
diff --git a/localstack/services/providers.py b/localstack/services/providers.py index 1aa898198d429..936d9536036cd 100644 --- a/localstack/services/providers.py +++ b/localstack/services/providers.py @@ -140,7 +140,7 @@ def kinesis(): @aws_provider() def kms(): - if config.KMS_PROVIDER == "kms-local": + if config.KMS_PROVIDER == "local-kms": from localstack.services.kms import kms_starter return Service("kms", start=kms_starter.start_kms_local)
pytorch__vision-2201
VideoClips Assertion Error Hello, I'm trying to load a big video. Following https://github.com/pytorch/vision/issues/1446 I used a VideoClips object, but it's crashing when trying to get clips with certain ids with this error: > AssertionError Traceback (most recent call last) > <ipython-input-9-6e97949ad7f5> in <module>() > ----> 1 x = video_clips.get_clip(1) > > /usr/local/lib/python3.6/dist-packages/torchvision/datasets/video_utils.py in get_clip(self, idx) > 324 video = video[resampling_idx] > 325 info["video_fps"] = self.frame_rate > --> 326 assert len(video) == self.num_frames, "{} x {}".format(video.shape, self.num_frames) > 327 return video, audio, info, video_idx > > AssertionError: torch.Size([0, 1, 1, 3]) x 32 The code I use is just this: ``` from torchvision.datasets.video_utils import VideoClips video_clips = VideoClips(["test_video.mp4"], clip_length_in_frames=32, frames_between_clips=32) for i in range(video_clips.num_clips()): x = video_clips.get_clip(i) ``` `video_clips.num_clips()` is much bigger than the ids that are failing. Changing the clipt_length or frames_between doesn't help. Checking the code I see [0,1,1,3] is returned by `read_video` when no vframes are read: https://github.com/pytorch/vision/blob/85b8fbfd31e9324e64e24ca25410284ef238bcb3/torchvision/io/video.py#L251-L254 But, for some clip ids and clip_lengths it's just that the sizes don't match, as the assertion error is something like this `AssertionError: torch.Size([19, 360, 640, 3]) x 128` I followed the issue to `_read_from_stream` and checked no AV exceptions where raised. And running this part of the function: https://github.com/pytorch/vision/blob/85b8fbfd31e9324e64e24ca25410284ef238bcb3/torchvision/io/video.py#L144-L150 I saw that for an `start_pts=32032`, `end_pts=63063` it returned just one frame on `frames` with `pts=237237`. Which is later discarted as it's a lot bigger than `end_pts`. Also, the `stream.time_base` is `Fraction(1, 24000)` which doesn't match the start and end pts provided by VideoClips. So it seems there is a problem with the seeking on my video. But it has a standard h264 encoding and I have no problem reading it sequentially with pyav. I'm wondering if I'm doing something wrong or there might be an issue with the `read_video` seeking (as the warning says it should be using seconds?). This is the video info according to ffmpeg: > Metadata: > major_brand : mp42 > minor_version : 0 > compatible_brands: mp42isom > creation_time : 2016-10-10T15:36:46.000000Z > Duration: 00:21:24.37, start: 0.000000, bitrate: 1002 kb/s > Stream #0:0(und): Video: h264 (Main) (avc1 / 0x31637661), yuv420p, 640x360 [SAR 1:1 DAR 16:9], 900 kb/s, 23.98 fps, 23.98 tbr, 24k tbn, 47.95 tbc (default) > Metadata: > handler_name : Telestream Inc. Telestream Media Framework - Release TXGP 2016.42.192059 > encoder : AVC > Stream #0:1(eng): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 93 kb/s (default) > Metadata: > handler_name : Telestream Inc. Telestream Media Framework - Release TXGP 2016.42.192059 Thanks!
[ { "content": "import gc\nimport math\nimport re\nimport warnings\nfrom typing import Tuple, List\n\nimport numpy as np\nimport torch\n\nfrom . import _video_opt\nfrom ._video_opt import VideoMetaData\n\n\ntry:\n import av\n\n av.logging.set_level(av.logging.ERROR)\n if not hasattr(av.video.frame.VideoFrame, \"pict_type\"):\n av = ImportError(\n \"\"\"\\\nYour version of PyAV is too old for the necessary video operations in torchvision.\nIf you are on Python 3.5, you will have to build from source (the conda-forge\npackages are not up-to-date). See\nhttps://github.com/mikeboers/PyAV#installation for instructions on how to\ninstall PyAV on your system.\n\"\"\"\n )\nexcept ImportError:\n av = ImportError(\n \"\"\"\\\nPyAV is not installed, and is necessary for the video operations in torchvision.\nSee https://github.com/mikeboers/PyAV#installation for instructions on how to\ninstall PyAV on your system.\n\"\"\"\n )\n\n\ndef _check_av_available():\n if isinstance(av, Exception):\n raise av\n\n\ndef _av_available():\n return not isinstance(av, Exception)\n\n\n# PyAV has some reference cycles\n_CALLED_TIMES = 0\n_GC_COLLECTION_INTERVAL = 10\n\n\ndef write_video(filename, video_array, fps, video_codec=\"libx264\", options=None):\n \"\"\"\n Writes a 4d tensor in [T, H, W, C] format in a video file\n\n Parameters\n ----------\n filename : str\n path where the video will be saved\n video_array : Tensor[T, H, W, C]\n tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format\n fps : Number\n frames per second\n \"\"\"\n _check_av_available()\n video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy()\n\n container = av.open(filename, mode=\"w\")\n\n stream = container.add_stream(video_codec, rate=fps)\n stream.width = video_array.shape[2]\n stream.height = video_array.shape[1]\n stream.pix_fmt = \"yuv420p\" if video_codec != \"libx264rgb\" else \"rgb24\"\n stream.options = options or {}\n\n for img in video_array:\n frame = av.VideoFrame.from_ndarray(img, format=\"rgb24\")\n frame.pict_type = \"NONE\"\n for packet in stream.encode(frame):\n container.mux(packet)\n\n # Flush stream\n for packet in stream.encode():\n container.mux(packet)\n\n # Close the file\n container.close()\n\n\ndef _read_from_stream(\n container, start_offset, end_offset, pts_unit, stream, stream_name\n):\n global _CALLED_TIMES, _GC_COLLECTION_INTERVAL\n _CALLED_TIMES += 1\n if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1:\n gc.collect()\n\n if pts_unit == \"sec\":\n start_offset = int(math.floor(start_offset * (1 / stream.time_base)))\n if end_offset != float(\"inf\"):\n end_offset = int(math.ceil(end_offset * (1 / stream.time_base)))\n else:\n warnings.warn(\n \"The pts_unit 'pts' gives wrong results and will be removed in a \"\n + \"follow-up version. Please use pts_unit 'sec'.\"\n )\n\n frames = {}\n should_buffer = False\n max_buffer_size = 5\n if stream.type == \"video\":\n # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)\n # so need to buffer some extra frames to sort everything\n # properly\n extradata = stream.codec_context.extradata\n # overly complicated way of finding if `divx_packed` is set, following\n # https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263\n if extradata and b\"DivX\" in extradata:\n # can't use regex directly because of some weird characters sometimes...\n pos = extradata.find(b\"DivX\")\n d = extradata[pos:]\n o = re.search(br\"DivX(\\d+)Build(\\d+)(\\w)\", d)\n if o is None:\n o = re.search(br\"DivX(\\d+)b(\\d+)(\\w)\", d)\n if o is not None:\n should_buffer = o.group(3) == b\"p\"\n seek_offset = start_offset\n # some files don't seek to the right location, so better be safe here\n seek_offset = max(seek_offset - 1, 0)\n if should_buffer:\n # FIXME this is kind of a hack, but we will jump to the previous keyframe\n # so this will be safe\n seek_offset = max(seek_offset - max_buffer_size, 0)\n try:\n # TODO check if stream needs to always be the video stream here or not\n container.seek(seek_offset, any_frame=False, backward=True, stream=stream)\n except av.AVError:\n # TODO add some warnings in this case\n # print(\"Corrupted file?\", container.name)\n return []\n buffer_count = 0\n try:\n for _idx, frame in enumerate(container.decode(**stream_name)):\n frames[frame.pts] = frame\n if frame.pts >= end_offset:\n if should_buffer and buffer_count < max_buffer_size:\n buffer_count += 1\n continue\n break\n except av.AVError:\n # TODO add a warning\n pass\n # ensure that the results are sorted wrt the pts\n result = [\n frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset\n ]\n if len(frames) > 0 and start_offset > 0 and start_offset not in frames:\n # if there is no frame that exactly matches the pts of start_offset\n # add the last frame smaller than start_offset, to guarantee that\n # we will have all the necessary data. This is most useful for audio\n preceding_frames = [i for i in frames if i < start_offset]\n if len(preceding_frames) > 0:\n first_frame_pts = max(preceding_frames)\n result.insert(0, frames[first_frame_pts])\n return result\n\n\ndef _align_audio_frames(aframes, audio_frames, ref_start, ref_end):\n start, end = audio_frames[0].pts, audio_frames[-1].pts\n total_aframes = aframes.shape[1]\n step_per_aframe = (end - start + 1) / total_aframes\n s_idx = 0\n e_idx = total_aframes\n if start < ref_start:\n s_idx = int((ref_start - start) / step_per_aframe)\n if end > ref_end:\n e_idx = int((ref_end - end) / step_per_aframe)\n return aframes[:, s_idx:e_idx]\n\n\ndef read_video(filename, start_pts=0, end_pts=None, pts_unit=\"pts\"):\n \"\"\"\n Reads a video from a file, returning both the video frames as well as\n the audio frames\n\n Parameters\n ----------\n filename : str\n path to the video file\n start_pts : int if pts_unit = 'pts', optional\n float / Fraction if pts_unit = 'sec', optional\n the start presentation time of the video\n end_pts : int if pts_unit = 'pts', optional\n float / Fraction if pts_unit = 'sec', optional\n the end presentation time\n pts_unit : str, optional\n unit in which start_pts and end_pts values will be interpreted, either 'pts' or 'sec'. Defaults to 'pts'.\n\n Returns\n -------\n vframes : Tensor[T, H, W, C]\n the `T` video frames\n aframes : Tensor[K, L]\n the audio frames, where `K` is the number of channels and `L` is the\n number of points\n info : Dict\n metadata for the video and audio. Can contain the fields video_fps (float)\n and audio_fps (int)\n \"\"\"\n\n from torchvision import get_video_backend\n\n if get_video_backend() != \"pyav\":\n return _video_opt._read_video(filename, start_pts, end_pts, pts_unit)\n\n _check_av_available()\n\n if end_pts is None:\n end_pts = float(\"inf\")\n\n if end_pts < start_pts:\n raise ValueError(\n \"end_pts should be larger than start_pts, got \"\n \"start_pts={} and end_pts={}\".format(start_pts, end_pts)\n )\n\n info = {}\n video_frames = []\n audio_frames = []\n\n try:\n container = av.open(filename, metadata_errors=\"ignore\")\n except av.AVError:\n # TODO raise a warning?\n pass\n else:\n if container.streams.video:\n video_frames = _read_from_stream(\n container,\n start_pts,\n end_pts,\n pts_unit,\n container.streams.video[0],\n {\"video\": 0},\n )\n video_fps = container.streams.video[0].average_rate\n # guard against potentially corrupted files\n if video_fps is not None:\n info[\"video_fps\"] = float(video_fps)\n\n if container.streams.audio:\n audio_frames = _read_from_stream(\n container,\n start_pts,\n end_pts,\n pts_unit,\n container.streams.audio[0],\n {\"audio\": 0},\n )\n info[\"audio_fps\"] = container.streams.audio[0].rate\n\n container.close()\n\n vframes = [frame.to_rgb().to_ndarray() for frame in video_frames]\n aframes = [frame.to_ndarray() for frame in audio_frames]\n\n if vframes:\n vframes = torch.as_tensor(np.stack(vframes))\n else:\n vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8)\n\n if aframes:\n aframes = np.concatenate(aframes, 1)\n aframes = torch.as_tensor(aframes)\n aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts)\n else:\n aframes = torch.empty((1, 0), dtype=torch.float32)\n\n return vframes, aframes, info\n\n\ndef _can_read_timestamps_from_packets(container):\n extradata = container.streams[0].codec_context.extradata\n if extradata is None:\n return False\n if b\"Lavc\" in extradata:\n return True\n return False\n\n\ndef read_video_timestamps(filename, pts_unit=\"pts\"):\n \"\"\"\n List the video frames timestamps.\n\n Note that the function decodes the whole video frame-by-frame.\n\n Parameters\n ----------\n filename : str\n path to the video file\n pts_unit : str, optional\n unit in which timestamp values will be returned either 'pts' or 'sec'. Defaults to 'pts'.\n\n Returns\n -------\n pts : List[int] if pts_unit = 'pts'\n List[Fraction] if pts_unit = 'sec'\n presentation timestamps for each one of the frames in the video.\n video_fps : int\n the frame rate for the video\n\n \"\"\"\n from torchvision import get_video_backend\n\n if get_video_backend() != \"pyav\":\n return _video_opt._read_video_timestamps(filename, pts_unit)\n\n _check_av_available()\n\n video_frames = []\n video_fps = None\n\n try:\n container = av.open(filename, metadata_errors=\"ignore\")\n except av.AVError:\n # TODO add a warning\n pass\n else:\n if container.streams.video:\n video_stream = container.streams.video[0]\n video_time_base = video_stream.time_base\n if _can_read_timestamps_from_packets(container):\n # fast path\n video_frames = [\n x for x in container.demux(video=0) if x.pts is not None\n ]\n else:\n video_frames = _read_from_stream(\n container, 0, float(\"inf\"), pts_unit, video_stream, {\"video\": 0}\n )\n video_fps = float(video_stream.average_rate)\n container.close()\n\n pts = [x.pts for x in video_frames]\n\n if pts_unit == \"sec\":\n pts = [x * video_time_base for x in pts]\n\n return pts, video_fps\n", "path": "torchvision/io/video.py" } ]
[ { "content": "import gc\nimport math\nimport re\nimport warnings\nfrom typing import Tuple, List\n\nimport numpy as np\nimport torch\n\nfrom . import _video_opt\nfrom ._video_opt import VideoMetaData\n\n\ntry:\n import av\n\n av.logging.set_level(av.logging.ERROR)\n if not hasattr(av.video.frame.VideoFrame, \"pict_type\"):\n av = ImportError(\n \"\"\"\\\nYour version of PyAV is too old for the necessary video operations in torchvision.\nIf you are on Python 3.5, you will have to build from source (the conda-forge\npackages are not up-to-date). See\nhttps://github.com/mikeboers/PyAV#installation for instructions on how to\ninstall PyAV on your system.\n\"\"\"\n )\nexcept ImportError:\n av = ImportError(\n \"\"\"\\\nPyAV is not installed, and is necessary for the video operations in torchvision.\nSee https://github.com/mikeboers/PyAV#installation for instructions on how to\ninstall PyAV on your system.\n\"\"\"\n )\n\n\ndef _check_av_available():\n if isinstance(av, Exception):\n raise av\n\n\ndef _av_available():\n return not isinstance(av, Exception)\n\n\n# PyAV has some reference cycles\n_CALLED_TIMES = 0\n_GC_COLLECTION_INTERVAL = 10\n\n\ndef write_video(filename, video_array, fps, video_codec=\"libx264\", options=None):\n \"\"\"\n Writes a 4d tensor in [T, H, W, C] format in a video file\n\n Parameters\n ----------\n filename : str\n path where the video will be saved\n video_array : Tensor[T, H, W, C]\n tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format\n fps : Number\n frames per second\n \"\"\"\n _check_av_available()\n video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy()\n\n container = av.open(filename, mode=\"w\")\n\n stream = container.add_stream(video_codec, rate=fps)\n stream.width = video_array.shape[2]\n stream.height = video_array.shape[1]\n stream.pix_fmt = \"yuv420p\" if video_codec != \"libx264rgb\" else \"rgb24\"\n stream.options = options or {}\n\n for img in video_array:\n frame = av.VideoFrame.from_ndarray(img, format=\"rgb24\")\n frame.pict_type = \"NONE\"\n for packet in stream.encode(frame):\n container.mux(packet)\n\n # Flush stream\n for packet in stream.encode():\n container.mux(packet)\n\n # Close the file\n container.close()\n\n\ndef _read_from_stream(\n container, start_offset, end_offset, pts_unit, stream, stream_name\n):\n global _CALLED_TIMES, _GC_COLLECTION_INTERVAL\n _CALLED_TIMES += 1\n if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1:\n gc.collect()\n\n if pts_unit == \"sec\":\n start_offset = int(math.floor(start_offset * (1 / stream.time_base)))\n if end_offset != float(\"inf\"):\n end_offset = int(math.ceil(end_offset * (1 / stream.time_base)))\n else:\n warnings.warn(\n \"The pts_unit 'pts' gives wrong results and will be removed in a \"\n + \"follow-up version. Please use pts_unit 'sec'.\"\n )\n\n frames = {}\n should_buffer = True\n max_buffer_size = 5\n if stream.type == \"video\":\n # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)\n # so need to buffer some extra frames to sort everything\n # properly\n extradata = stream.codec_context.extradata\n # overly complicated way of finding if `divx_packed` is set, following\n # https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263\n if extradata and b\"DivX\" in extradata:\n # can't use regex directly because of some weird characters sometimes...\n pos = extradata.find(b\"DivX\")\n d = extradata[pos:]\n o = re.search(br\"DivX(\\d+)Build(\\d+)(\\w)\", d)\n if o is None:\n o = re.search(br\"DivX(\\d+)b(\\d+)(\\w)\", d)\n if o is not None:\n should_buffer = o.group(3) == b\"p\"\n seek_offset = start_offset\n # some files don't seek to the right location, so better be safe here\n seek_offset = max(seek_offset - 1, 0)\n if should_buffer:\n # FIXME this is kind of a hack, but we will jump to the previous keyframe\n # so this will be safe\n seek_offset = max(seek_offset - max_buffer_size, 0)\n try:\n # TODO check if stream needs to always be the video stream here or not\n container.seek(seek_offset, any_frame=False, backward=True, stream=stream)\n except av.AVError:\n # TODO add some warnings in this case\n # print(\"Corrupted file?\", container.name)\n return []\n buffer_count = 0\n try:\n for _idx, frame in enumerate(container.decode(**stream_name)):\n frames[frame.pts] = frame\n if frame.pts >= end_offset:\n if should_buffer and buffer_count < max_buffer_size:\n buffer_count += 1\n continue\n break\n except av.AVError:\n # TODO add a warning\n pass\n # ensure that the results are sorted wrt the pts\n result = [\n frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset\n ]\n if len(frames) > 0 and start_offset > 0 and start_offset not in frames:\n # if there is no frame that exactly matches the pts of start_offset\n # add the last frame smaller than start_offset, to guarantee that\n # we will have all the necessary data. This is most useful for audio\n preceding_frames = [i for i in frames if i < start_offset]\n if len(preceding_frames) > 0:\n first_frame_pts = max(preceding_frames)\n result.insert(0, frames[first_frame_pts])\n return result\n\n\ndef _align_audio_frames(aframes, audio_frames, ref_start, ref_end):\n start, end = audio_frames[0].pts, audio_frames[-1].pts\n total_aframes = aframes.shape[1]\n step_per_aframe = (end - start + 1) / total_aframes\n s_idx = 0\n e_idx = total_aframes\n if start < ref_start:\n s_idx = int((ref_start - start) / step_per_aframe)\n if end > ref_end:\n e_idx = int((ref_end - end) / step_per_aframe)\n return aframes[:, s_idx:e_idx]\n\n\ndef read_video(filename, start_pts=0, end_pts=None, pts_unit=\"pts\"):\n \"\"\"\n Reads a video from a file, returning both the video frames as well as\n the audio frames\n\n Parameters\n ----------\n filename : str\n path to the video file\n start_pts : int if pts_unit = 'pts', optional\n float / Fraction if pts_unit = 'sec', optional\n the start presentation time of the video\n end_pts : int if pts_unit = 'pts', optional\n float / Fraction if pts_unit = 'sec', optional\n the end presentation time\n pts_unit : str, optional\n unit in which start_pts and end_pts values will be interpreted, either 'pts' or 'sec'. Defaults to 'pts'.\n\n Returns\n -------\n vframes : Tensor[T, H, W, C]\n the `T` video frames\n aframes : Tensor[K, L]\n the audio frames, where `K` is the number of channels and `L` is the\n number of points\n info : Dict\n metadata for the video and audio. Can contain the fields video_fps (float)\n and audio_fps (int)\n \"\"\"\n\n from torchvision import get_video_backend\n\n if get_video_backend() != \"pyav\":\n return _video_opt._read_video(filename, start_pts, end_pts, pts_unit)\n\n _check_av_available()\n\n if end_pts is None:\n end_pts = float(\"inf\")\n\n if end_pts < start_pts:\n raise ValueError(\n \"end_pts should be larger than start_pts, got \"\n \"start_pts={} and end_pts={}\".format(start_pts, end_pts)\n )\n\n info = {}\n video_frames = []\n audio_frames = []\n\n try:\n container = av.open(filename, metadata_errors=\"ignore\")\n except av.AVError:\n # TODO raise a warning?\n pass\n else:\n if container.streams.video:\n video_frames = _read_from_stream(\n container,\n start_pts,\n end_pts,\n pts_unit,\n container.streams.video[0],\n {\"video\": 0},\n )\n video_fps = container.streams.video[0].average_rate\n # guard against potentially corrupted files\n if video_fps is not None:\n info[\"video_fps\"] = float(video_fps)\n\n if container.streams.audio:\n audio_frames = _read_from_stream(\n container,\n start_pts,\n end_pts,\n pts_unit,\n container.streams.audio[0],\n {\"audio\": 0},\n )\n info[\"audio_fps\"] = container.streams.audio[0].rate\n\n container.close()\n\n vframes = [frame.to_rgb().to_ndarray() for frame in video_frames]\n aframes = [frame.to_ndarray() for frame in audio_frames]\n\n if vframes:\n vframes = torch.as_tensor(np.stack(vframes))\n else:\n vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8)\n\n if aframes:\n aframes = np.concatenate(aframes, 1)\n aframes = torch.as_tensor(aframes)\n aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts)\n else:\n aframes = torch.empty((1, 0), dtype=torch.float32)\n\n return vframes, aframes, info\n\n\ndef _can_read_timestamps_from_packets(container):\n extradata = container.streams[0].codec_context.extradata\n if extradata is None:\n return False\n if b\"Lavc\" in extradata:\n return True\n return False\n\n\ndef read_video_timestamps(filename, pts_unit=\"pts\"):\n \"\"\"\n List the video frames timestamps.\n\n Note that the function decodes the whole video frame-by-frame.\n\n Parameters\n ----------\n filename : str\n path to the video file\n pts_unit : str, optional\n unit in which timestamp values will be returned either 'pts' or 'sec'. Defaults to 'pts'.\n\n Returns\n -------\n pts : List[int] if pts_unit = 'pts'\n List[Fraction] if pts_unit = 'sec'\n presentation timestamps for each one of the frames in the video.\n video_fps : int\n the frame rate for the video\n\n \"\"\"\n from torchvision import get_video_backend\n\n if get_video_backend() != \"pyav\":\n return _video_opt._read_video_timestamps(filename, pts_unit)\n\n _check_av_available()\n\n video_frames = []\n video_fps = None\n\n try:\n container = av.open(filename, metadata_errors=\"ignore\")\n except av.AVError:\n # TODO add a warning\n pass\n else:\n if container.streams.video:\n video_stream = container.streams.video[0]\n video_time_base = video_stream.time_base\n if _can_read_timestamps_from_packets(container):\n # fast path\n video_frames = [\n x for x in container.demux(video=0) if x.pts is not None\n ]\n else:\n video_frames = _read_from_stream(\n container, 0, float(\"inf\"), pts_unit, video_stream, {\"video\": 0}\n )\n video_fps = float(video_stream.average_rate)\n container.close()\n\n pts = [x.pts for x in video_frames]\n\n if pts_unit == \"sec\":\n pts = [x * video_time_base for x in pts]\n\n return pts, video_fps\n", "path": "torchvision/io/video.py" } ]
diff --git a/torchvision/io/video.py b/torchvision/io/video.py index 40d1cfeed85..f0a47d49faf 100644 --- a/torchvision/io/video.py +++ b/torchvision/io/video.py @@ -106,7 +106,7 @@ def _read_from_stream( ) frames = {} - should_buffer = False + should_buffer = True max_buffer_size = 5 if stream.type == "video": # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)
angr__angr-2265
ZeroDivisionError when performing Propagator analysis <!-- *Disclaimer: The angr suite is maintained by a small team of volunteers. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)* --> --- **Describe the bug.** <!-- Please include a clear and concise description of what the bug is. --> When I trying to perform propagator analysis against a mips32el binary, it throw ZeroDivisionError exception. Here is the test script: ```python import angr import IPython example = "./httpd" proj = angr.Project(example, auto_load_libs=False) cfg = proj.analyses.CFGFast(show_progressbar=True) func = cfg.functions[0x43b26c] prop = proj.analyses.Propagator(func=func, only_consts=True) replacements=prop.replacements IPython.embed() ``` And the traceback: ``` Traceback (most recent call last): File "/mnt/data/karonte_proj/angr-dev/angr/test.py", line 12, in <module> prop = proj.analyses.Propagator(func=func, only_consts=True) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/analysis.py", line 115, in __call__ oself.__init__(*args, **kwargs) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/propagator.py", line 355, in __init__ self._analyze() File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/propagator.py", line 495, in _analyze self._analysis_core_graph() File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/forward_analysis/forward_analysis.py", line 240, in _analysis_core_graph changed, output_state = self._run_on_node(n, job_state) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/propagator.py", line 403, in _run_on_node load_callback=self._load_callback, fail_fast=self._fail_fast) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/engine_base.py", line 27, in process self._process(state, None, block=kwargs.pop('block', None)) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/engine_vex.py", line 25, in _process super()._process(state, successors, block=block, whitelist=whitelist, **kwargs) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 83, in _process self._process_Stmt(whitelist=whitelist) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 104, in _process_Stmt self._handle_Stmt(stmt) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 132, in _handle_Stmt getattr(self, handler)(stmt) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/engine_vex.py", line 102, in _handle_WrTmp super()._handle_WrTmp(stmt) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 138, in _handle_WrTmp data = self._expr(stmt.data) File "/mnt/data/karonte_proj/angr-dev/angr/angr/analyses/propagator/engine_vex.py", line 45, in _expr v = super()._expr(expr) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 170, in _expr return getattr(self, handler)(expr) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 263, in _handle_Binop return getattr(self, handler)(expr) File "/mnt/data/karonte_proj/angr-dev/angr/angr/engines/light/engine.py", line 422, in _handle_Div return expr_0 // expr_1 ZeroDivisionError: integer division or modulo by zero Process finished with exit code 1 ``` **Environment Information.** <!-- Many common issues are caused by problems with the local Python environment. Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date. Please include the output of `python -m angr.misc.bug_report` here. --> ``` angr environment report ============================= Date: 2020-07-17 11:34:11.000442 Running in virtual environment at /home/test/.virtualenvs/karonte3 Platform: linux-x86_64 Python version: 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0] ######## angr ######### Python found it in /mnt/data/karonte_proj/angr-dev/angr/angr Pip version angr 8.20.6.8 Git info: Current commit f44d989b05e5b6825e8d7d100868dc85a8356bef from branch master Checked out from remote origin: https://github.com/angr/angr-dev ######## ailment ######### Python found it in /mnt/data/karonte_proj/angr-dev/ailment/ailment Pip version ailment 8.20.7.6 Git info: Current commit 3490c152766fe7df04dfa922fa58c03166676944 from branch master Checked out from remote origin: https://github.com/angr/ailment ######## cle ######### Python found it in /mnt/data/karonte_proj/angr-dev/cle/cle Pip version cle 8.20.7.6 Git info: Current commit 36f7a0e140fc96bb644f21d0c13d60713d6ae025 from branch master Checked out from remote origin: https://github.com/angr/cle ######## pyvex ######### Python found it in /home/test/.virtualenvs/karonte3/lib/python3.6/site-packages/pyvex Pip version pyvex 8.20.7.6 Couldn't find git info ######## claripy ######### Python found it in /mnt/data/karonte_proj/angr-dev/claripy/claripy Pip version claripy 8.20.7.6 Git info: Current commit f2c1998731efca4838a4edb9dec77e0424c5f691 from branch master Checked out from remote origin: https://github.com/angr/claripy ######## archinfo ######### Python found it in /mnt/data/karonte_proj/angr-dev/archinfo/archinfo Pip version archinfo 8.20.7.6 Git info: Current commit c48e4b40a6a1f7edab6bd0597fe3d5dda1d73e62 from branch master Checked out from remote origin: https://github.com/angr/archinfo ######## z3 ######### Python found it in /home/test/.virtualenvs/karonte3/lib/python3.6/site-packages/z3 Pip version z3-solver 4.8.8.0 Couldn't find git info ######## unicorn ######### Python found it in /home/test/.virtualenvs/karonte3/lib/python3.6/site-packages/unicorn Pip version unicorn 1.0.2rc4 Couldn't find git info ######### Native Module Info ########## angr: <CDLL '/mnt/data/karonte_proj/angr-dev/angr/angr/lib/angr_native.so', handle 219b540 at 0x7fdbe4d63c88> unicorn: <CDLL '/home/test/.virtualenvs/karonte3/lib/python3.6/site-packages/unicorn/lib/libunicorn.so', handle 1345d20 at 0x7fdbe3d19be0> pyvex: <cffi.api._make_ffi_library.<locals>.FFILibrary object at 0x7fdbe4c84668> ``` **To Reproduce.** <!-- Please include *both a script to reproduce the crash, and attach the binary used, if possible* --> Here is the binary used in the testcase. [httpd.zip](https://github.com/angr/angr/files/4935609/httpd.zip) **Additional context.** <!-- Add any other context about the problem here. -->
[ { "content": "# pylint:disable=no-self-use\nfrom typing import Tuple\n\nimport logging\n\nimport ailment\nimport pyvex\nimport archinfo\n\nfrom ...engines.vex.claripy.irop import operations as vex_operations\nfrom ...code_location import CodeLocation\nfrom ...utils.constants import DEFAULT_STATEMENT\nfrom ..engine import SimEngine\n\n\nclass SimEngineLight(SimEngine):\n def __init__(self):\n super(SimEngineLight, self).__init__()\n\n self.l = logging.getLogger(self.__module__ + \".\" + self.__class__.__name__)\n\n # local variables\n self.state = None\n self.arch = None\n self.block = None\n self._call_stack = None\n\n self.stmt_idx = None\n self.ins_addr = None\n self.tmps = None\n\n # for VEX blocks only\n self.tyenv = None\n\n def process(self, state, *args, **kwargs):\n # we are using a completely different state. Therefore, we directly call our _process() method before\n # SimEngine becomes flexible enough.\n self._process(state, None, block=kwargs.pop('block', None), whitelist=kwargs.pop('whitelist', None))\n\n def _process(self, new_state, successors, *args, **kwargs):\n raise NotImplementedError()\n\n def _check(self, state, *args, **kwargs):\n return True\n\n #\n # Helper methods\n #\n\n @property\n def _context(self) -> Tuple[int]:\n if not self._call_stack:\n return tuple()\n\n # Convert to Tuple to make `context` hashable if not None\n call_stack_addresses = tuple(self._call_stack)\n return call_stack_addresses\n\n def _codeloc(self, block_only=False):\n return CodeLocation(self.block.addr,\n None if block_only else self.stmt_idx,\n ins_addr=None if block_only else self.ins_addr,\n context=self._context\n )\n\n\nclass SimEngineLightVEXMixin:\n\n def _process(self, state, successors, *args, block=None, whitelist=None, **kwargs): # pylint:disable=arguments-differ,unused-argument\n\n assert block is not None\n\n # initialize local variables\n self.tmps = {}\n self.block = block\n self.state = state\n\n if state is not None:\n self.arch: archinfo.Arch = state.arch\n\n self.tyenv = block.vex.tyenv\n\n self._process_Stmt(whitelist=whitelist)\n\n self.stmt_idx = None\n self.ins_addr = None\n\n def _process_Stmt(self, whitelist=None):\n\n if whitelist is not None:\n # optimize whitelist lookups\n whitelist = set(whitelist)\n\n for stmt_idx, stmt in enumerate(self.block.vex.statements):\n if whitelist is not None and stmt_idx not in whitelist:\n continue\n self.stmt_idx = stmt_idx\n\n if type(stmt) is pyvex.IRStmt.IMark:\n # Note that we cannot skip IMarks as they are used later to trigger observation events\n # The bug caused by skipping IMarks is reported at https://github.com/angr/angr/pull/1150\n self.ins_addr = stmt.addr + stmt.delta\n\n self._handle_Stmt(stmt)\n\n self._process_block_end()\n\n def _process_block_end(self):\n # handle calls to another function\n # Note that without global information, we cannot handle cases where we *jump* to another function (jumpkind ==\n # \"Ijk_Boring\"). Users are supposed to overwrite this method, detect these cases with the help of global\n # information (such as CFG or symbol addresses), and handle them accordingly.\n if self.block.vex.jumpkind == 'Ijk_Call':\n self.stmt_idx = DEFAULT_STATEMENT\n handler = '_handle_function'\n if hasattr(self, handler):\n func_addr = self._expr(self.block.vex.next)\n if func_addr is not None:\n getattr(self, handler)(func_addr)\n else:\n self.l.debug('Cannot determine the callee address at %#x.', self.block.addr)\n else:\n self.l.warning('Function handler not implemented.')\n\n #\n # Statement handlers\n #\n\n def _handle_Stmt(self, stmt):\n handler = \"_handle_%s\" % type(stmt).__name__\n if hasattr(self, handler):\n getattr(self, handler)(stmt)\n elif type(stmt).__name__ not in ('IMark', 'AbiHint'):\n self.l.error('Unsupported statement type %s.', type(stmt).__name__)\n\n # synchronize with function _handle_WrTmpData()\n def _handle_WrTmp(self, stmt):\n data = self._expr(stmt.data)\n if data is None:\n return\n\n self.tmps[stmt.tmp] = data\n\n # invoked by LoadG\n def _handle_WrTmpData(self, tmp, data):\n if data is None:\n return\n self.tmps[tmp] = data\n\n def _handle_Put(self, stmt):\n raise NotImplementedError('Please implement the Put handler with your own logic.')\n\n def _handle_Store(self, stmt):\n raise NotImplementedError('Please implement the Store handler with your own logic.')\n\n def _handle_StoreG(self, stmt):\n raise NotImplementedError('Please implement the StoreG handler with your own logic.')\n\n def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC):\n raise NotImplementedError('Please implement the LLSC handler with your own logic.')\n\n #\n # Expression handlers\n #\n\n def _expr(self, expr):\n\n handler = \"_handle_%s\" % type(expr).__name__\n if hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported expression type %s.', type(expr).__name__)\n return None\n\n def _handle_RdTmp(self, expr):\n tmp = expr.tmp\n\n if tmp in self.tmps:\n return self.tmps[tmp]\n return None\n\n def _handle_Get(self, expr):\n raise NotImplementedError('Please implement the Get handler with your own logic.')\n\n def _handle_Load(self, expr):\n raise NotImplementedError('Please implement the Load handler with your own logic.')\n\n def _handle_LoadG(self, stmt):\n raise NotImplementedError('Please implement the LoadG handler with your own logic.')\n\n def _handle_Exit(self, stmt):\n self._expr(stmt.guard)\n self._expr(stmt.dst)\n\n def _handle_ITE(self, expr):\n # EDG says: Not sure how generic this is.\n cond = self._expr(expr.cond)\n if cond is True:\n return self._expr(expr.iftrue)\n elif cond is False:\n return self._expr(expr.iffalse)\n else:\n return None\n\n def _handle_Unop(self, expr):\n handler = None\n\n # All conversions are handled by the Conversion handler\n simop = vex_operations.get(expr.op)\n if simop is not None and simop.op_attrs.get('conversion', None):\n handler = '_handle_Conversion'\n # Notice order of \"Not\" comparisons\n elif expr.op == 'Iop_Not1':\n handler = '_handle_Not1'\n elif expr.op.startswith('Iop_Not'):\n handler = '_handle_Not'\n\n if handler is not None and hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported Unop %s.', expr.op)\n return None\n\n def _handle_Binop(self, expr):\n handler = None\n if expr.op.startswith('Iop_And'):\n handler = '_handle_And'\n elif expr.op.startswith('Iop_Or'):\n handler = '_handle_Or'\n elif expr.op.startswith('Iop_Add'):\n handler = '_handle_Add'\n elif expr.op.startswith('Iop_Sub'):\n handler = '_handle_Sub'\n elif expr.op.startswith('Iop_Mul'):\n handler = \"_handle_Mul\"\n elif expr.op.startswith('Iop_Div'):\n handler = \"_handle_Div\"\n elif expr.op.startswith('Iop_Xor'):\n handler = '_handle_Xor'\n elif expr.op.startswith('Iop_Shl'):\n handler = '_handle_Shl'\n elif expr.op.startswith('Iop_Shr'):\n handler = '_handle_Shr'\n elif expr.op.startswith('Iop_Sal'):\n # intended use of SHL\n handler = '_handle_Shl'\n elif expr.op.startswith('Iop_Sar'):\n handler = '_handle_Sar'\n elif expr.op.startswith('Iop_CmpEQ'):\n handler = '_handle_CmpEQ'\n elif expr.op.startswith('Iop_CmpNE'):\n handler = '_handle_CmpNE'\n elif expr.op.startswith('Iop_CmpLT'):\n handler = '_handle_CmpLT'\n elif expr.op.startswith('Iop_CmpLE'):\n handler = '_handle_CmpLE'\n elif expr.op.startswith('Iop_CmpORD'):\n handler = '_handle_CmpORD'\n elif expr.op.startswith('Const'):\n handler = '_handle_Const'\n\n if handler is not None and hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported Binop %s.', expr.op)\n\n return None\n\n def _handle_CCall(self, expr): # pylint:disable=useless-return\n self.l.warning('Unsupported expression type CCall with callee %s.', str(expr.cee))\n return None\n\n #\n # Unary operation handlers\n #\n\n def _handle_U32(self, expr):\n return expr.value\n\n def _handle_U64(self, expr):\n return expr.value\n\n def _handle_U16(self, expr):\n return expr.value\n\n def _handle_U8(self, expr):\n return expr.value\n\n def _handle_U1(self, expr):\n return expr.value\n\n def _handle_Const(self, expr): # pylint:disable=no-self-use\n return expr.con.value\n\n def _handle_Conversion(self, expr):\n expr = self._expr(expr.args[0])\n if expr is None:\n return None\n\n # FIXME: implement real conversion\n return expr\n\n #\n # Binary operation handlers\n #\n\n def _handle_And(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 & expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Or(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 | expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Not1(self, expr):\n return self._handle_Not(expr)\n\n def _handle_Not(self, expr):\n arg0 = expr.args[0]\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n try:\n return ~expr_0 # pylint:disable=invalid-unary-operand-type\n except TypeError as e:\n self.l.exception(e)\n return None\n\n def _handle_Add(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 + expr_1) & mask\n else:\n return expr_0 + expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Sub(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 - expr_1) & mask\n else:\n return expr_0 - expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Mul(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 * expr_1) & mask\n else:\n return expr_0 * expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Div(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n # TODO: Probably should take care of the sign\n return expr_0 // expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Xor(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 ^ expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Shl(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 << expr_1) & mask\n else:\n return expr_0 << expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Shr(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 >> expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Sar(self, expr):\n # EDG asks: is this right?\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n try:\n return expr_0 >> expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_CmpEQ(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 == expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpNE(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 != expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpLE(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 <= expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpLT(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 < expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_MBE(self, expr): # pylint:disable=unused-argument\n # Yeah.... no.\n return None\n\nclass SimEngineLightAILMixin:\n\n def _process(self, state, successors, *args, block=None, whitelist=None, **kwargs): # pylint:disable=arguments-differ,unused-argument\n\n self.tmps = {}\n self.block = block\n self.state = state\n self.arch = state.arch\n\n self._process_Stmt(whitelist=whitelist)\n\n self.stmt_idx = None\n self.ins_addr = None\n\n def _process_Stmt(self, whitelist=None):\n\n if whitelist is not None:\n whitelist = set(whitelist)\n\n for stmt_idx, stmt in enumerate(self.block.statements):\n if whitelist is not None and stmt_idx not in whitelist:\n continue\n\n self.stmt_idx = stmt_idx\n self.ins_addr = stmt.ins_addr\n\n self._handle_Stmt(stmt)\n\n def _expr(self, expr):\n\n handler = \"_ail_handle_%s\" % type(expr).__name__\n if hasattr(self, handler):\n return getattr(self, handler)(expr)\n self.l.warning('Unsupported expression type %s.', type(expr).__name__)\n return None\n\n #\n # Helper methods\n #\n\n def _codeloc(self):\n return CodeLocation(self.block.addr, self.stmt_idx, ins_addr=self.ins_addr, context=self._context)\n\n #\n # Statement handlers\n #\n\n def _handle_Stmt(self, stmt):\n handler = \"_handle_%s\" % type(stmt).__name__\n if hasattr(self, handler):\n getattr(self, handler)(stmt)\n return\n\n # compatibility\n old_handler = \"_ail_handle_%s\" % type(stmt).__name__\n if hasattr(self, old_handler):\n getattr(self, old_handler)(stmt)\n return\n\n self.l.warning('Unsupported statement type %s.', type(stmt).__name__)\n\n def _ail_handle_Jump(self, stmt):\n raise NotImplementedError('Please implement the Jump handler with your own logic.')\n\n def _ail_handle_Call(self, stmt):\n raise NotImplementedError('Please implement the Call handler with your own logic.')\n\n #\n # Expression handlers\n #\n\n def _ail_handle_Const(self, expr): # pylint:disable=no-self-use\n return expr.value\n\n def _ail_handle_Tmp(self, expr):\n tmp_idx = expr.tmp_idx\n\n try:\n return self.tmps[tmp_idx]\n except KeyError:\n return None\n\n def _ail_handle_Load(self, expr):\n raise NotImplementedError('Please implement the Load handler with your own logic.')\n\n def _ail_handle_UnaryOp(self, expr):\n handler_name = '_ail_handle_%s' % expr.op\n try:\n handler = getattr(self, handler_name)\n except AttributeError:\n self.l.warning('Unsupported UnaryOp %s.', expr.op)\n return None\n\n return handler(expr)\n\n def _ail_handle_BinaryOp(self, expr):\n handler_name = '_ail_handle_%s' % expr.op\n try:\n handler = getattr(self, handler_name)\n except AttributeError:\n self.l.warning('Unsupported BinaryOp %s.', expr.op)\n return None\n\n return handler(expr)\n\n #\n # Binary operation handlers\n #\n\n def _ail_handle_CmpLT(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 <= expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'CmpLT', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Add(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 + expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Add', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Sub(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 - expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Sub', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Div(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 // expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Div', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_DivMod(self, expr):\n return self._ail_handle_Div(expr)\n\n def _ail_handle_Mul(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 * expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Mul', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Mull(self, expr):\n return self._ail_handle_Mul(expr)\n\n def _ail_handle_And(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 & expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'And', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Or(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 | expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Or', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Xor(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 ^ expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Xor', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Shr(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 >> expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Shr', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Shl(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 << expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Shl', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Sal(self, expr):\n return self._ail_handle_Shl(expr)\n\n def _ail_handle_Sar(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 >> expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Sar', [expr_0, expr_1], expr.signed, **expr.tags)\n\n #\n # Unary operation handlers\n #\n\n def _ail_handle_Convert(self, expr):\n data = self._expr(expr.operand)\n if data is not None:\n if type(data) is int:\n return data\n return None\n\n def _ail_handle_Not(self, expr):\n\n data = self._expr(expr.operand)\n if data is None:\n return None\n\n try:\n return ~data # pylint:disable=invalid-unary-operand-type\n except TypeError:\n return ailment.Expr.UnaryOp(expr.idx, 'Not', data, **expr.tags)\n\n\n# Compatibility\nSimEngineLightVEX = SimEngineLightVEXMixin\nSimEngineLightAIL = SimEngineLightAILMixin\n", "path": "angr/engines/light/engine.py" } ]
[ { "content": "# pylint:disable=no-self-use\nfrom typing import Tuple\n\nimport logging\n\nimport ailment\nimport pyvex\nimport archinfo\n\nfrom ...engines.vex.claripy.irop import operations as vex_operations\nfrom ...code_location import CodeLocation\nfrom ...utils.constants import DEFAULT_STATEMENT\nfrom ..engine import SimEngine\n\n\nclass SimEngineLight(SimEngine):\n def __init__(self):\n super(SimEngineLight, self).__init__()\n\n self.l = logging.getLogger(self.__module__ + \".\" + self.__class__.__name__)\n\n # local variables\n self.state = None\n self.arch = None\n self.block = None\n self._call_stack = None\n\n self.stmt_idx = None\n self.ins_addr = None\n self.tmps = None\n\n # for VEX blocks only\n self.tyenv = None\n\n def process(self, state, *args, **kwargs):\n # we are using a completely different state. Therefore, we directly call our _process() method before\n # SimEngine becomes flexible enough.\n self._process(state, None, block=kwargs.pop('block', None), whitelist=kwargs.pop('whitelist', None))\n\n def _process(self, new_state, successors, *args, **kwargs):\n raise NotImplementedError()\n\n def _check(self, state, *args, **kwargs):\n return True\n\n #\n # Helper methods\n #\n\n @property\n def _context(self) -> Tuple[int]:\n if not self._call_stack:\n return tuple()\n\n # Convert to Tuple to make `context` hashable if not None\n call_stack_addresses = tuple(self._call_stack)\n return call_stack_addresses\n\n def _codeloc(self, block_only=False):\n return CodeLocation(self.block.addr,\n None if block_only else self.stmt_idx,\n ins_addr=None if block_only else self.ins_addr,\n context=self._context\n )\n\n\nclass SimEngineLightVEXMixin:\n\n def _process(self, state, successors, *args, block=None, whitelist=None, **kwargs): # pylint:disable=arguments-differ,unused-argument\n\n assert block is not None\n\n # initialize local variables\n self.tmps = {}\n self.block = block\n self.state = state\n\n if state is not None:\n self.arch: archinfo.Arch = state.arch\n\n self.tyenv = block.vex.tyenv\n\n self._process_Stmt(whitelist=whitelist)\n\n self.stmt_idx = None\n self.ins_addr = None\n\n def _process_Stmt(self, whitelist=None):\n\n if whitelist is not None:\n # optimize whitelist lookups\n whitelist = set(whitelist)\n\n for stmt_idx, stmt in enumerate(self.block.vex.statements):\n if whitelist is not None and stmt_idx not in whitelist:\n continue\n self.stmt_idx = stmt_idx\n\n if type(stmt) is pyvex.IRStmt.IMark:\n # Note that we cannot skip IMarks as they are used later to trigger observation events\n # The bug caused by skipping IMarks is reported at https://github.com/angr/angr/pull/1150\n self.ins_addr = stmt.addr + stmt.delta\n\n self._handle_Stmt(stmt)\n\n self._process_block_end()\n\n def _process_block_end(self):\n # handle calls to another function\n # Note that without global information, we cannot handle cases where we *jump* to another function (jumpkind ==\n # \"Ijk_Boring\"). Users are supposed to overwrite this method, detect these cases with the help of global\n # information (such as CFG or symbol addresses), and handle them accordingly.\n if self.block.vex.jumpkind == 'Ijk_Call':\n self.stmt_idx = DEFAULT_STATEMENT\n handler = '_handle_function'\n if hasattr(self, handler):\n func_addr = self._expr(self.block.vex.next)\n if func_addr is not None:\n getattr(self, handler)(func_addr)\n else:\n self.l.debug('Cannot determine the callee address at %#x.', self.block.addr)\n else:\n self.l.warning('Function handler not implemented.')\n\n #\n # Statement handlers\n #\n\n def _handle_Stmt(self, stmt):\n handler = \"_handle_%s\" % type(stmt).__name__\n if hasattr(self, handler):\n getattr(self, handler)(stmt)\n elif type(stmt).__name__ not in ('IMark', 'AbiHint'):\n self.l.error('Unsupported statement type %s.', type(stmt).__name__)\n\n # synchronize with function _handle_WrTmpData()\n def _handle_WrTmp(self, stmt):\n data = self._expr(stmt.data)\n if data is None:\n return\n\n self.tmps[stmt.tmp] = data\n\n # invoked by LoadG\n def _handle_WrTmpData(self, tmp, data):\n if data is None:\n return\n self.tmps[tmp] = data\n\n def _handle_Put(self, stmt):\n raise NotImplementedError('Please implement the Put handler with your own logic.')\n\n def _handle_Store(self, stmt):\n raise NotImplementedError('Please implement the Store handler with your own logic.')\n\n def _handle_StoreG(self, stmt):\n raise NotImplementedError('Please implement the StoreG handler with your own logic.')\n\n def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC):\n raise NotImplementedError('Please implement the LLSC handler with your own logic.')\n\n #\n # Expression handlers\n #\n\n def _expr(self, expr):\n\n handler = \"_handle_%s\" % type(expr).__name__\n if hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported expression type %s.', type(expr).__name__)\n return None\n\n def _handle_RdTmp(self, expr):\n tmp = expr.tmp\n\n if tmp in self.tmps:\n return self.tmps[tmp]\n return None\n\n def _handle_Get(self, expr):\n raise NotImplementedError('Please implement the Get handler with your own logic.')\n\n def _handle_Load(self, expr):\n raise NotImplementedError('Please implement the Load handler with your own logic.')\n\n def _handle_LoadG(self, stmt):\n raise NotImplementedError('Please implement the LoadG handler with your own logic.')\n\n def _handle_Exit(self, stmt):\n self._expr(stmt.guard)\n self._expr(stmt.dst)\n\n def _handle_ITE(self, expr):\n # EDG says: Not sure how generic this is.\n cond = self._expr(expr.cond)\n if cond is True:\n return self._expr(expr.iftrue)\n elif cond is False:\n return self._expr(expr.iffalse)\n else:\n return None\n\n def _handle_Unop(self, expr):\n handler = None\n\n # All conversions are handled by the Conversion handler\n simop = vex_operations.get(expr.op)\n if simop is not None and simop.op_attrs.get('conversion', None):\n handler = '_handle_Conversion'\n # Notice order of \"Not\" comparisons\n elif expr.op == 'Iop_Not1':\n handler = '_handle_Not1'\n elif expr.op.startswith('Iop_Not'):\n handler = '_handle_Not'\n\n if handler is not None and hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported Unop %s.', expr.op)\n return None\n\n def _handle_Binop(self, expr):\n handler = None\n if expr.op.startswith('Iop_And'):\n handler = '_handle_And'\n elif expr.op.startswith('Iop_Or'):\n handler = '_handle_Or'\n elif expr.op.startswith('Iop_Add'):\n handler = '_handle_Add'\n elif expr.op.startswith('Iop_Sub'):\n handler = '_handle_Sub'\n elif expr.op.startswith('Iop_Mul'):\n handler = \"_handle_Mul\"\n elif expr.op.startswith('Iop_Div'):\n handler = \"_handle_Div\"\n elif expr.op.startswith('Iop_Xor'):\n handler = '_handle_Xor'\n elif expr.op.startswith('Iop_Shl'):\n handler = '_handle_Shl'\n elif expr.op.startswith('Iop_Shr'):\n handler = '_handle_Shr'\n elif expr.op.startswith('Iop_Sal'):\n # intended use of SHL\n handler = '_handle_Shl'\n elif expr.op.startswith('Iop_Sar'):\n handler = '_handle_Sar'\n elif expr.op.startswith('Iop_CmpEQ'):\n handler = '_handle_CmpEQ'\n elif expr.op.startswith('Iop_CmpNE'):\n handler = '_handle_CmpNE'\n elif expr.op.startswith('Iop_CmpLT'):\n handler = '_handle_CmpLT'\n elif expr.op.startswith('Iop_CmpLE'):\n handler = '_handle_CmpLE'\n elif expr.op.startswith('Iop_CmpORD'):\n handler = '_handle_CmpORD'\n elif expr.op.startswith('Const'):\n handler = '_handle_Const'\n\n if handler is not None and hasattr(self, handler):\n return getattr(self, handler)(expr)\n else:\n self.l.error('Unsupported Binop %s.', expr.op)\n\n return None\n\n def _handle_CCall(self, expr): # pylint:disable=useless-return\n self.l.warning('Unsupported expression type CCall with callee %s.', str(expr.cee))\n return None\n\n #\n # Unary operation handlers\n #\n\n def _handle_U32(self, expr):\n return expr.value\n\n def _handle_U64(self, expr):\n return expr.value\n\n def _handle_U16(self, expr):\n return expr.value\n\n def _handle_U8(self, expr):\n return expr.value\n\n def _handle_U1(self, expr):\n return expr.value\n\n def _handle_Const(self, expr): # pylint:disable=no-self-use\n return expr.con.value\n\n def _handle_Conversion(self, expr):\n expr = self._expr(expr.args[0])\n if expr is None:\n return None\n\n # FIXME: implement real conversion\n return expr\n\n #\n # Binary operation handlers\n #\n\n def _handle_And(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 & expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Or(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 | expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Not1(self, expr):\n return self._handle_Not(expr)\n\n def _handle_Not(self, expr):\n arg0 = expr.args[0]\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n try:\n return ~expr_0 # pylint:disable=invalid-unary-operand-type\n except TypeError as e:\n self.l.exception(e)\n return None\n\n def _handle_Add(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 + expr_1) & mask\n else:\n return expr_0 + expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Sub(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 - expr_1) & mask\n else:\n return expr_0 - expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Mul(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 * expr_1) & mask\n else:\n return expr_0 * expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Div(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n # TODO: Probably should take care of the sign\n return expr_0 // expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n except ZeroDivisionError as e:\n self.l.warning(e)\n return None\n\n def _handle_Xor(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 ^ expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Shl(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n if isinstance(expr_0, int) and isinstance(expr_1, int):\n # self.tyenv is not used\n mask = (1 << expr.result_size(self.tyenv)) - 1\n return (expr_0 << expr_1) & mask\n else:\n return expr_0 << expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Shr(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 >> expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_Sar(self, expr):\n # EDG asks: is this right?\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n try:\n return expr_0 >> expr_1\n except TypeError as e:\n self.l.warning(e)\n return None\n\n def _handle_CmpEQ(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 == expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpNE(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 != expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpLE(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 <= expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_CmpLT(self, expr):\n arg0, arg1 = expr.args\n expr_0 = self._expr(arg0)\n if expr_0 is None:\n return None\n expr_1 = self._expr(arg1)\n if expr_1 is None:\n return None\n\n try:\n return expr_0 < expr_1\n except TypeError as ex:\n self.l.warning(ex)\n return None\n\n def _handle_MBE(self, expr): # pylint:disable=unused-argument\n # Yeah.... no.\n return None\n\nclass SimEngineLightAILMixin:\n\n def _process(self, state, successors, *args, block=None, whitelist=None, **kwargs): # pylint:disable=arguments-differ,unused-argument\n\n self.tmps = {}\n self.block = block\n self.state = state\n self.arch = state.arch\n\n self._process_Stmt(whitelist=whitelist)\n\n self.stmt_idx = None\n self.ins_addr = None\n\n def _process_Stmt(self, whitelist=None):\n\n if whitelist is not None:\n whitelist = set(whitelist)\n\n for stmt_idx, stmt in enumerate(self.block.statements):\n if whitelist is not None and stmt_idx not in whitelist:\n continue\n\n self.stmt_idx = stmt_idx\n self.ins_addr = stmt.ins_addr\n\n self._handle_Stmt(stmt)\n\n def _expr(self, expr):\n\n handler = \"_ail_handle_%s\" % type(expr).__name__\n if hasattr(self, handler):\n return getattr(self, handler)(expr)\n self.l.warning('Unsupported expression type %s.', type(expr).__name__)\n return None\n\n #\n # Helper methods\n #\n\n def _codeloc(self):\n return CodeLocation(self.block.addr, self.stmt_idx, ins_addr=self.ins_addr, context=self._context)\n\n #\n # Statement handlers\n #\n\n def _handle_Stmt(self, stmt):\n handler = \"_handle_%s\" % type(stmt).__name__\n if hasattr(self, handler):\n getattr(self, handler)(stmt)\n return\n\n # compatibility\n old_handler = \"_ail_handle_%s\" % type(stmt).__name__\n if hasattr(self, old_handler):\n getattr(self, old_handler)(stmt)\n return\n\n self.l.warning('Unsupported statement type %s.', type(stmt).__name__)\n\n def _ail_handle_Jump(self, stmt):\n raise NotImplementedError('Please implement the Jump handler with your own logic.')\n\n def _ail_handle_Call(self, stmt):\n raise NotImplementedError('Please implement the Call handler with your own logic.')\n\n #\n # Expression handlers\n #\n\n def _ail_handle_Const(self, expr): # pylint:disable=no-self-use\n return expr.value\n\n def _ail_handle_Tmp(self, expr):\n tmp_idx = expr.tmp_idx\n\n try:\n return self.tmps[tmp_idx]\n except KeyError:\n return None\n\n def _ail_handle_Load(self, expr):\n raise NotImplementedError('Please implement the Load handler with your own logic.')\n\n def _ail_handle_UnaryOp(self, expr):\n handler_name = '_ail_handle_%s' % expr.op\n try:\n handler = getattr(self, handler_name)\n except AttributeError:\n self.l.warning('Unsupported UnaryOp %s.', expr.op)\n return None\n\n return handler(expr)\n\n def _ail_handle_BinaryOp(self, expr):\n handler_name = '_ail_handle_%s' % expr.op\n try:\n handler = getattr(self, handler_name)\n except AttributeError:\n self.l.warning('Unsupported BinaryOp %s.', expr.op)\n return None\n\n return handler(expr)\n\n #\n # Binary operation handlers\n #\n\n def _ail_handle_CmpLT(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 <= expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'CmpLT', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Add(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 + expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Add', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Sub(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 - expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Sub', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Div(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 // expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Div', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_DivMod(self, expr):\n return self._ail_handle_Div(expr)\n\n def _ail_handle_Mul(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 * expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Mul', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Mull(self, expr):\n return self._ail_handle_Mul(expr)\n\n def _ail_handle_And(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 & expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'And', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Or(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 | expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Or', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Xor(self, expr):\n\n arg0, arg1 = expr.operands\n\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 ^ expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Xor', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Shr(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 >> expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Shr', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Shl(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 << expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Shl', [expr_0, expr_1], expr.signed, **expr.tags)\n\n def _ail_handle_Sal(self, expr):\n return self._ail_handle_Shl(expr)\n\n def _ail_handle_Sar(self, expr):\n\n arg0, arg1 = expr.operands\n expr_0 = self._expr(arg0)\n expr_1 = self._expr(arg1)\n\n if expr_0 is None:\n expr_0 = arg0\n if expr_1 is None:\n expr_1 = arg1\n\n try:\n return expr_0 >> expr_1\n except TypeError:\n return ailment.Expr.BinaryOp(expr.idx, 'Sar', [expr_0, expr_1], expr.signed, **expr.tags)\n\n #\n # Unary operation handlers\n #\n\n def _ail_handle_Convert(self, expr):\n data = self._expr(expr.operand)\n if data is not None:\n if type(data) is int:\n return data\n return None\n\n def _ail_handle_Not(self, expr):\n\n data = self._expr(expr.operand)\n if data is None:\n return None\n\n try:\n return ~data # pylint:disable=invalid-unary-operand-type\n except TypeError:\n return ailment.Expr.UnaryOp(expr.idx, 'Not', data, **expr.tags)\n\n\n# Compatibility\nSimEngineLightVEX = SimEngineLightVEXMixin\nSimEngineLightAIL = SimEngineLightAILMixin\n", "path": "angr/engines/light/engine.py" } ]
diff --git a/angr/engines/light/engine.py b/angr/engines/light/engine.py index 025743dd1dd..568248c3706 100644 --- a/angr/engines/light/engine.py +++ b/angr/engines/light/engine.py @@ -423,6 +423,9 @@ def _handle_Div(self, expr): except TypeError as e: self.l.warning(e) return None + except ZeroDivisionError as e: + self.l.warning(e) + return None def _handle_Xor(self, expr): arg0, arg1 = expr.args
comic__grand-challenge.org-3383
Viewer configuration does not show linking options **Describe the bug** The view and edit pages for viewer configurations no longer show options to set the linking configuration. **To Reproduce** Steps to reproduce the behavior: 1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/ 2. Scroll down to 'Linking Configuration' The options displayed are duplicates of the 'Plugin and Tools' section. **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here.
[ { "content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm³\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py" } ]
[ { "content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm³\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py" } ]
diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py index 9e9dded697..fb58b1f260 100644 --- a/app/grandchallenge/workstation_configs/forms.py +++ b/app/grandchallenge/workstation_configs/forms.py @@ -79,7 +79,7 @@ def __init__(self, *args, read_only=False, **kwargs): ), Fieldset( "Linking Configuration", - *PLUGIN_FIELDS, + *LINKED_FIELDS, css_class="border rounded px-2 my-4", ), )
streamlit__streamlit-1469
Spelling mistake while running streamlit hello , DataFrame Demo # Summary I noticed a spelling mistake in dataframe demo while runing streamlit hello , It displays UN Data Exlorer instead of UN Data Explorer # Steps to reproduce 1. Go to terminal 2. Run `streamlit hello` 3. Open browser at localhost:8501 and choose dataframe demo ## Expected behavior: It should display correct spelling as `(Data courtesy of the UN Data Exlporer.)` ## Actual behavior: It's displaying `(Data courtesy of the UN Data Exlorer.)` ![image](https://user-images.githubusercontent.com/54715558/82215467-169e2200-9935-11ea-9c8d-f43d5f9ae454.png) ## Is this a regression? no # Debug info - Streamlit version: 0.57.3 - Python version: 3.8.2 - Using Conda? PipEnv? PyEnv? Pex? Conda - OS version: Windows 10 - Browser version: Chrome v81.0 # Additional information If needed, add any other context about the problem here. For example, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport textwrap\nfrom collections import OrderedDict\n\nimport streamlit as st\nfrom streamlit.logger import get_logger\nfrom streamlit.hello import demos\n\nLOGGER = get_logger(__name__)\n\n# Dictionary of\n# demo_name -> (demo_function, demo_description)\nDEMOS = OrderedDict(\n [\n (\"—\", (demos.intro, None)),\n (\n \"Animation Demo\",\n (\n demos.fractal_demo,\n \"\"\"\nThis app shows how you can use Streamlit to build cool animations.\nIt displays an animated fractal based on the the Julia Set. Use the slider\nto tune different parameters.\n\"\"\",\n ),\n ),\n (\n \"Plotting Demo\",\n (\n demos.plotting_demo,\n \"\"\"\nThis demo illustrates a combination of plotting and animation with\nStreamlit. We're generating a bunch of random numbers in a loop for around\n5 seconds. Enjoy!\n\"\"\",\n ),\n ),\n (\n \"Mapping Demo\",\n (\n demos.mapping_demo,\n \"\"\"\nThis demo shows how to use\n[`st.deck_gl_chart`](https://docs.streamlit.io/api.html#streamlit.deck_gl_chart)\nto display geospatial data.\n\"\"\",\n ),\n ),\n (\n \"DataFrame Demo\",\n (\n demos.data_frame_demo,\n \"\"\"\nThis demo shows how to use `st.write` to visualize Pandas DataFrames.\n\n(Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).)\n\"\"\",\n ),\n ),\n ]\n)\n\n\ndef run():\n demo_name = st.sidebar.selectbox(\"Choose a demo\", list(DEMOS.keys()), 0)\n demo = DEMOS[demo_name][0]\n\n if demo_name == \"—\":\n show_code = False\n st.write(\"# Welcome to Streamlit! 👋\")\n else:\n show_code = st.sidebar.checkbox(\"Show code\", True)\n st.markdown(\"# %s\" % demo_name)\n description = DEMOS[demo_name][1]\n if description:\n st.write(description)\n # Clear everything from the intro page.\n # We only have 4 elements in the page so this is intentional overkill.\n for i in range(10):\n st.empty()\n\n demo()\n\n if show_code:\n st.markdown(\"## Code\")\n sourcelines, _ = inspect.getsourcelines(demo)\n st.code(textwrap.dedent(\"\".join(sourcelines[1:])))\n\n\nif __name__ == \"__main__\":\n run()\n", "path": "lib/streamlit/hello/hello.py" } ]
[ { "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport textwrap\nfrom collections import OrderedDict\n\nimport streamlit as st\nfrom streamlit.logger import get_logger\nfrom streamlit.hello import demos\n\nLOGGER = get_logger(__name__)\n\n# Dictionary of\n# demo_name -> (demo_function, demo_description)\nDEMOS = OrderedDict(\n [\n (\"—\", (demos.intro, None)),\n (\n \"Animation Demo\",\n (\n demos.fractal_demo,\n \"\"\"\nThis app shows how you can use Streamlit to build cool animations.\nIt displays an animated fractal based on the the Julia Set. Use the slider\nto tune different parameters.\n\"\"\",\n ),\n ),\n (\n \"Plotting Demo\",\n (\n demos.plotting_demo,\n \"\"\"\nThis demo illustrates a combination of plotting and animation with\nStreamlit. We're generating a bunch of random numbers in a loop for around\n5 seconds. Enjoy!\n\"\"\",\n ),\n ),\n (\n \"Mapping Demo\",\n (\n demos.mapping_demo,\n \"\"\"\nThis demo shows how to use\n[`st.deck_gl_chart`](https://docs.streamlit.io/api.html#streamlit.deck_gl_chart)\nto display geospatial data.\n\"\"\",\n ),\n ),\n (\n \"DataFrame Demo\",\n (\n demos.data_frame_demo,\n \"\"\"\nThis demo shows how to use `st.write` to visualize Pandas DataFrames.\n\n(Data courtesy of the [UN Data Explorer](http://data.un.org/Explorer.aspx).)\n\"\"\",\n ),\n ),\n ]\n)\n\n\ndef run():\n demo_name = st.sidebar.selectbox(\"Choose a demo\", list(DEMOS.keys()), 0)\n demo = DEMOS[demo_name][0]\n\n if demo_name == \"—\":\n show_code = False\n st.write(\"# Welcome to Streamlit! 👋\")\n else:\n show_code = st.sidebar.checkbox(\"Show code\", True)\n st.markdown(\"# %s\" % demo_name)\n description = DEMOS[demo_name][1]\n if description:\n st.write(description)\n # Clear everything from the intro page.\n # We only have 4 elements in the page so this is intentional overkill.\n for i in range(10):\n st.empty()\n\n demo()\n\n if show_code:\n st.markdown(\"## Code\")\n sourcelines, _ = inspect.getsourcelines(demo)\n st.code(textwrap.dedent(\"\".join(sourcelines[1:])))\n\n\nif __name__ == \"__main__\":\n run()\n", "path": "lib/streamlit/hello/hello.py" } ]
diff --git a/lib/streamlit/hello/hello.py b/lib/streamlit/hello/hello.py index d39b5042c4ff..56c861ae7de2 100644 --- a/lib/streamlit/hello/hello.py +++ b/lib/streamlit/hello/hello.py @@ -67,7 +67,7 @@ """ This demo shows how to use `st.write` to visualize Pandas DataFrames. -(Data courtesy of the [UN Data Exlorer](http://data.un.org/Explorer.aspx).) +(Data courtesy of the [UN Data Explorer](http://data.un.org/Explorer.aspx).) """, ), ),
mdn__kuma-6978
SystemError: <method 'get' of 'dict' objects> returned a result with an error set https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6659909/ ``` timeout: timeout SystemError: <method 'get' of 'dict' objects> returned a result with an error set File "meinheld/mlogging.py", line 187, in _access 'h': environ.get('REMOTE_ADDR', '-'), SystemError: <method 'get' of 'dict' objects> returned a result with an error set ``` Low priority: 12x in 2yrs, but might worth looking into with spare time.
[ { "content": "import newrelic.agent\nfrom django.http import HttpResponseBadRequest, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_GET\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n shared_cache_control,\n)\n\nfrom ..constants import ALLOWED_TAGS, REDIRECT_CONTENT\nfrom ..decorators import allow_CORS_GET\nfrom ..models import Document, EditorToolbar\n\n\n@ensure_wiki_domain\n@shared_cache_control\n@require_GET\ndef ckeditor_config(request):\n \"\"\"\n Return ckeditor config from database\n \"\"\"\n default_config = EditorToolbar.objects.filter(name=\"default\")\n if default_config.exists():\n code = default_config[0].code\n else:\n code = \"\"\n\n context = {\n \"editor_config\": code,\n \"redirect_pattern\": REDIRECT_CONTENT,\n \"allowed_tags\": \" \".join(ALLOWED_TAGS),\n }\n return render(\n request,\n \"wiki/ckeditor_config.js\",\n context,\n content_type=\"application/x-javascript\",\n )\n\n\n@shared_cache_control\[email protected]_trace()\n@block_user_agents\n@require_GET\n@allow_CORS_GET\ndef autosuggest_documents(request):\n \"\"\"\n Returns the closest title matches for front-end autosuggests\n \"\"\"\n partial_title = request.GET.get(\"term\", \"\")\n locale = request.GET.get(\"locale\", False)\n current_locale = request.GET.get(\"current_locale\", False)\n exclude_current_locale = request.GET.get(\"exclude_current_locale\", False)\n\n if not partial_title:\n # Only handle actual autosuggest requests, not requests for a\n # memory-busting list of all documents.\n return HttpResponseBadRequest(\n _(\n \"Autosuggest requires a partial \"\n \"title. For a full document \"\n \"index, see the main page.\"\n )\n )\n\n # Retrieve all documents that aren't redirects\n docs = (\n Document.objects.extra(select={\"length\": \"Length(slug)\"})\n .filter(title__icontains=partial_title, is_redirect=0)\n .exclude(slug__icontains=\"Talk:\") # Remove old talk pages\n .order_by(\"title\", \"length\")\n )\n\n # All locales are assumed, unless a specific locale is requested or banned\n if locale:\n docs = docs.filter(locale=locale)\n if current_locale:\n docs = docs.filter(locale=request.LANGUAGE_CODE)\n if exclude_current_locale:\n docs = docs.exclude(locale=request.LANGUAGE_CODE)\n\n # Generates a list of acceptable docs\n docs_list = []\n for doc in docs:\n data = doc.get_json_data()\n data[\"label\"] += \" [\" + doc.locale + \"]\"\n docs_list.append(data)\n\n return JsonResponse(docs_list, safe=False)\n", "path": "kuma/wiki/views/misc.py" } ]
[ { "content": "import newrelic.agent\nfrom django.http import HttpResponseBadRequest, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_GET\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n shared_cache_control,\n)\n\nfrom ..constants import ALLOWED_TAGS, REDIRECT_CONTENT\nfrom ..decorators import allow_CORS_GET\nfrom ..models import Document, EditorToolbar\n\n\n@ensure_wiki_domain\n@shared_cache_control\n@require_GET\ndef ckeditor_config(request):\n \"\"\"\n Return ckeditor config from database\n \"\"\"\n default_config = EditorToolbar.objects.filter(name=\"default\")\n if default_config.exists():\n code = default_config[0].code\n else:\n code = \"\"\n\n context = {\n \"editor_config\": code,\n \"redirect_pattern\": REDIRECT_CONTENT,\n \"allowed_tags\": \" \".join(ALLOWED_TAGS),\n }\n return render(\n request,\n \"wiki/ckeditor_config.js\",\n context,\n content_type=\"application/x-javascript\",\n )\n\n\n@shared_cache_control\[email protected]_trace()\n@block_user_agents\n@require_GET\n@allow_CORS_GET\ndef autosuggest_documents(request):\n \"\"\"\n Returns the closest title matches for front-end autosuggests\n \"\"\"\n partial_title = request.GET.get(\"term\", \"\")\n locale = request.GET.get(\"locale\", False)\n current_locale = request.GET.get(\"current_locale\", False)\n exclude_current_locale = request.GET.get(\"exclude_current_locale\", False)\n\n if not partial_title:\n # Only handle actual autosuggest requests, not requests for a\n # memory-busting list of all documents.\n return HttpResponseBadRequest(\n _(\n \"Autosuggest requires a partial \"\n \"title. For a full document \"\n \"index, see the main page.\"\n )\n )\n\n # Retrieve all documents that aren't redirects\n docs = (\n Document.objects.extra(select={\"length\": \"Length(slug)\"})\n .filter(title__icontains=partial_title, is_redirect=0)\n .exclude(slug__icontains=\"Talk:\") # Remove old talk pages\n .order_by(\"title\", \"length\")\n )\n\n # All locales are assumed, unless a specific locale is requested or banned\n if locale:\n docs = docs.filter(locale=locale)\n if current_locale:\n docs = docs.filter(locale=request.LANGUAGE_CODE)\n if exclude_current_locale:\n docs = docs.exclude(locale=request.LANGUAGE_CODE)\n\n # Generates a list of acceptable docs\n docs_list = []\n for doc in docs[:100]:\n data = doc.get_json_data()\n data[\"label\"] += \" [\" + doc.locale + \"]\"\n docs_list.append(data)\n\n return JsonResponse(docs_list, safe=False)\n", "path": "kuma/wiki/views/misc.py" } ]
diff --git a/kuma/wiki/views/misc.py b/kuma/wiki/views/misc.py index 958b29250d0..4347233e4cf 100644 --- a/kuma/wiki/views/misc.py +++ b/kuma/wiki/views/misc.py @@ -84,7 +84,7 @@ def autosuggest_documents(request): # Generates a list of acceptable docs docs_list = [] - for doc in docs: + for doc in docs[:100]: data = doc.get_json_data() data["label"] += " [" + doc.locale + "]" docs_list.append(data)
DjangoGirls__djangogirls-926
DataError: value too long for type character varying(30) Sentry Issue: [DJANGO-GIRLS-WEBSITE-60](https://django-girls.sentry.io/issues/4635355583/?referrer=github_integration) ``` StringDataRightTruncation: value too long for type character varying(30) File "django/db/backends/utils.py", line 84, in _execute return self.cursor.execute(sql, params) DataError: value too long for type character varying(30) (20 additional frame(s) were not displayed) ... File "organize/views.py", line 50, in done application = EventApplication.object.create(**data_dict) File "organize/models.py", line 75, in create return super().create(**data_dict) File "organize/models.py", line 146, in save super().save(*args, **kwargs) ``` DataError: value too long for type character varying(30) Sentry Issue: [DJANGO-GIRLS-WEBSITE-60](https://django-girls.sentry.io/issues/4635355583/?referrer=github_integration) ``` StringDataRightTruncation: value too long for type character varying(30) File "django/db/backends/utils.py", line 84, in _execute return self.cursor.execute(sql, params) DataError: value too long for type character varying(30) (20 additional frame(s) were not displayed) ... File "organize/views.py", line 50, in done application = EventApplication.object.create(**data_dict) File "organize/models.py", line 75, in create return super().create(**data_dict) File "organize/models.py", line 146, in save super().save(*args, **kwargs) ```
[ { "content": "from datetime import date, datetime, timedelta\n\nimport requests\nfrom django.utils import timezone\nfrom django_date_extensions.fields import ApproximateDate\n\nfrom .models import Event\n\nNOMINATIM_URL = \"https://nominatim.openstreetmap.org/search\"\n\n\ndef get_coordinates_for_city(city, country):\n q = f\"{city}, {country}\"\n req = requests.get(NOMINATIM_URL, params={\"format\": \"json\", \"q\": q})\n\n try:\n data = req.json()[0]\n return f'{data[\"lat\"]}, {data[\"lon\"]}'\n except (IndexError, KeyError):\n return None\n\n\ndef get_event(page_url, is_user_authenticated, is_preview):\n now = timezone.now()\n now_approx = ApproximateDate(year=now.year, month=now.month, day=now.day)\n try:\n event = Event.objects.get(page_url=page_url)\n except Event.DoesNotExist:\n return None\n except Event.MultipleObjectsReturned:\n event = Event.objects.filter(page_url=page_url).order_by(\"-date\").first()\n\n if not (is_user_authenticated or is_preview) and not event.is_page_live:\n try:\n past = event.date <= now_approx\n except AttributeError:\n past = True\n return page_url, past\n\n return event\n\n\ndef get_approximate_date(date_str):\n try:\n date_obj = datetime.strptime(date_str, \"%d/%m/%Y\")\n return ApproximateDate(year=date_obj.year, month=date_obj.month, day=date_obj.day)\n except ValueError:\n try:\n date_obj = datetime.strptime(date_str, \"%m/%Y\")\n return ApproximateDate(year=date_obj.year, month=date_obj.month)\n except ValueError:\n return None\n\n\ndef next_sunday(day):\n \"\"\"\n Return a date object corresponding to the next Sunday after the given date.\n If the given date is a Sunday, return the Sunday next week.\n \"\"\"\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))\n\n\ndef next_deadline():\n \"\"\"\n Return the next deadline when we need to send invoices to GitHub.\n Deadlines are every second Sunday, starting from September 4th 2016.\n \"\"\"\n\n today = date.today()\n\n days_since_starting_sunday = (today - date(2016, 9, 4)).days\n\n if days_since_starting_sunday % 14 < 7:\n return next_sunday(next_sunday(today))\n else:\n return next_sunday(today)\n", "path": "core/utils.py" } ]
[ { "content": "from datetime import date, datetime, timedelta\n\nimport requests\nfrom django.utils import timezone\nfrom django_date_extensions.fields import ApproximateDate\n\nfrom .models import Event\n\nNOMINATIM_URL = \"https://nominatim.openstreetmap.org/search\"\n\n\ndef get_coordinates_for_city(city, country):\n q = f\"{city}, {country}\"\n req = requests.get(NOMINATIM_URL, params={\"format\": \"json\", \"q\": q})\n\n try:\n data = req.json()[0]\n formatted_lat = \"{:.7f}\".format(float(data[\"lat\"]))\n formatted_lon = \"{:.7f}\".format(float(data[\"lon\"]))\n return f\"{formatted_lat}, {formatted_lon}\"\n except (IndexError, KeyError):\n return None\n\n\ndef get_event(page_url, is_user_authenticated, is_preview):\n now = timezone.now()\n now_approx = ApproximateDate(year=now.year, month=now.month, day=now.day)\n try:\n event = Event.objects.get(page_url=page_url)\n except Event.DoesNotExist:\n return None\n except Event.MultipleObjectsReturned:\n event = Event.objects.filter(page_url=page_url).order_by(\"-date\").first()\n\n if not (is_user_authenticated or is_preview) and not event.is_page_live:\n past = event.date <= now_approx\n return page_url, past\n\n return event\n\n\ndef get_approximate_date(date_str):\n try:\n date_obj = datetime.strptime(date_str, \"%d/%m/%Y\")\n return ApproximateDate(year=date_obj.year, month=date_obj.month, day=date_obj.day)\n except ValueError:\n try:\n date_obj = datetime.strptime(date_str, \"%m/%Y\")\n return ApproximateDate(year=date_obj.year, month=date_obj.month)\n except ValueError:\n return None\n\n\ndef next_sunday(day):\n \"\"\"\n Return a date object corresponding to the next Sunday after the given date.\n If the given date is a Sunday, return the Sunday next week.\n \"\"\"\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))\n\n\ndef next_deadline():\n \"\"\"\n Return the next deadline when we need to send invoices to GitHub.\n Deadlines are every second Sunday, starting from September 4th 2016.\n \"\"\"\n\n today = date.today()\n\n days_since_starting_sunday = (today - date(2016, 9, 4)).days\n\n if days_since_starting_sunday % 14 < 7:\n return next_sunday(next_sunday(today))\n else:\n return next_sunday(today)\n", "path": "core/utils.py" } ]
diff --git a/core/utils.py b/core/utils.py index b6800a327..ef589f72f 100644 --- a/core/utils.py +++ b/core/utils.py @@ -15,7 +15,9 @@ def get_coordinates_for_city(city, country): try: data = req.json()[0] - return f'{data["lat"]}, {data["lon"]}' + formatted_lat = "{:.7f}".format(float(data["lat"])) + formatted_lon = "{:.7f}".format(float(data["lon"])) + return f"{formatted_lat}, {formatted_lon}" except (IndexError, KeyError): return None diff --git a/tests/core/test_utils.py b/tests/core/test_utils.py index 1036e60fa..c52852195 100644 --- a/tests/core/test_utils.py +++ b/tests/core/test_utils.py @@ -24,7 +24,9 @@ def test_get_coordinates_for_city(mock_get): "q": "London, UK", }, ) - assert result == "1.23, 4.56" + expected_lat = "{:.7f}".format(float("1.23")) + expected_lon = "{:.7f}".format(float("4.56")) + assert result == f"{expected_lat}, {expected_lon}" @mock.patch("requests.get")
qtile__qtile-1578
qtile error to load
[ { "content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\nlocale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py" } ]
[ { "content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n# Copyright (c) 2011, Florian Mounier\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Set the locale before any widgets or anything are imported, so any widget\n# whose defaults depend on a reasonable locale sees something reasonable.\nimport locale\nimport logging\nfrom os import path, getenv, makedirs\n\nfrom libqtile.log_utils import init_log, logger\nfrom libqtile import confreader\nfrom libqtile.backend.x11 import xcore\n\ntry:\n locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore\nexcept locale.Error:\n pass\n\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef rename_process():\n \"\"\"\n Try to rename the qtile process if py-setproctitle is installed:\n\n http://code.google.com/p/py-setproctitle/\n\n Will fail silently if it's not installed. Setting the title lets you do\n stuff like \"killall qtile\".\n \"\"\"\n try:\n import setproctitle\n setproctitle.setproctitle(\"qtile\")\n except ImportError:\n pass\n\n\ndef make_qtile():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description='A full-featured, pure-Python tiling window manager.',\n prog='qtile',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n )\n parser.add_argument(\n \"-c\", \"--config\",\n action=\"store\",\n default=path.expanduser(path.join(\n getenv('XDG_CONFIG_HOME', '~/.config'), 'qtile', 'config.py')),\n dest=\"configfile\",\n help='Use the specified configuration file',\n )\n parser.add_argument(\n \"-s\", \"--socket\",\n action=\"store\",\n default=None,\n dest=\"socket\",\n help='Path of the Qtile IPC socket.'\n )\n parser.add_argument(\n \"-n\", \"--no-spawn\",\n action=\"store_true\",\n default=False,\n dest=\"no_spawn\",\n help='Avoid spawning apps. (Used for restart)'\n )\n parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n parser.add_argument(\n '--with-state',\n default=None,\n dest='state',\n help='Pickled QtileState object (typically used only internally)',\n )\n options = parser.parse_args()\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level)\n\n kore = xcore.XCore()\n try:\n if not path.isfile(options.configfile):\n try:\n makedirs(path.dirname(options.configfile), exist_ok=True)\n from shutil import copyfile\n default_config_path = path.join(path.dirname(__file__),\n \"..\",\n \"resources\",\n \"default_config.py\")\n copyfile(default_config_path, options.configfile)\n logger.info('Copied default_config.py to %s', options.configfile)\n except Exception as e:\n logger.exception('Failed to copy default_config.py to %s: (%s)',\n options.configfile, e)\n\n config = confreader.Config.from_file(kore, options.configfile)\n except Exception as e:\n logger.exception('Error while reading config file (%s)', e)\n config = confreader.Config()\n from libqtile.widget import TextBox\n widgets = config.screens[0].bottom.widgets\n widgets.insert(0, TextBox('Config Err!'))\n\n # XXX: the import is here because we need to call init_log\n # before start importing stuff\n from libqtile.core import session_manager\n return session_manager.SessionManager(\n kore,\n config,\n fname=options.socket,\n no_spawn=options.no_spawn,\n state=options.state,\n )\n\n\ndef main():\n rename_process()\n q = make_qtile()\n try:\n q.loop()\n except Exception:\n logger.exception('Qtile crashed')\n logger.info('Exiting...')\n", "path": "libqtile/scripts/qtile.py" } ]
diff --git a/libqtile/scripts/qtile.py b/libqtile/scripts/qtile.py index 2b919a450f..6b64057f43 100644 --- a/libqtile/scripts/qtile.py +++ b/libqtile/scripts/qtile.py @@ -29,7 +29,11 @@ from libqtile import confreader from libqtile.backend.x11 import xcore -locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore +try: + locale.setlocale(locale.LC_ALL, locale.getdefaultlocale()) # type: ignore +except locale.Error: + pass + try: import pkg_resources
archlinux__archinstall-2190
Password visible on Profiles list After creating a new user, the "Password" is visible in plain text under the list of available profiles, unless this is the intended behavior? ![image](https://github.com/archlinux/archinstall/assets/99000334/ab51d3e7-4b62-452e-9da4-86deb872be87) ![image](https://github.com/archlinux/archinstall/assets/99000334/67d447e0-a01a-4ed6-a008-817716e21dc2)
[ { "content": "import logging\nimport os\nimport sys\nimport unicodedata\nfrom enum import Enum\n\nfrom pathlib import Path\nfrom typing import Dict, Union, List, Any, Callable, Optional\nfrom dataclasses import asdict, is_dataclass\n\nfrom .storage import storage\n\n\nclass FormattedOutput:\n\n\t@classmethod\n\tdef _get_values(\n\t\tcls,\n\t\to: Any,\n\t\tclass_formatter: Optional[Union[str, Callable]] = None,\n\t\tfilter_list: List[str] = []\n\t) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tthe original values returned a dataclass as dict thru the call to some specific methods\n\t\tthis version allows thru the parameter class_formatter to call a dynamically selected formatting method.\n\t\tCan transmit a filter list to the class_formatter,\n\t\t\"\"\"\n\t\tif class_formatter:\n\t\t\t# if invoked per reference it has to be a standard function or a classmethod.\n\t\t\t# A method of an instance does not make sense\n\t\t\tif callable(class_formatter):\n\t\t\t\treturn class_formatter(o, filter_list)\n\t\t\t# if is invoked by name we restrict it to a method of the class. No need to mess more\n\t\t\telif hasattr(o, class_formatter) and callable(getattr(o, class_formatter)):\n\t\t\t\tfunc = getattr(o, class_formatter)\n\t\t\t\treturn func(filter_list)\n\n\t\t\traise ValueError('Unsupported formatting call')\n\t\telif hasattr(o, 'table_data'):\n\t\t\treturn o.table_data()\n\t\telif is_dataclass(o):\n\t\t\treturn asdict(o)\n\t\telse:\n\t\t\treturn o.__dict__\n\n\t@classmethod\n\tdef as_table(\n\t\tcls,\n\t\tobj: List[Any],\n\t\tclass_formatter: Optional[Union[str, Callable]] = None,\n\t\tfilter_list: List[str] = [],\n\t\tcapitalize: bool = False\n\t) -> str:\n\t\t\"\"\" variant of as_table (subtly different code) which has two additional parameters\n\t\tfilter which is a list of fields which will be shon\n\t\tclass_formatter a special method to format the outgoing data\n\n\t\tA general comment, the format selected for the output (a string where every data record is separated by newline)\n\t\tis for compatibility with a print statement\n\t\tAs_table_filter can be a drop in replacement for as_table\n\t\t\"\"\"\n\t\traw_data = [cls._get_values(o, class_formatter, filter_list) for o in obj]\n\n\t\t# determine the maximum column size\n\t\tcolumn_width: Dict[str, int] = {}\n\t\tfor o in raw_data:\n\t\t\tfor k, v in o.items():\n\t\t\t\tif not filter_list or k in filter_list:\n\t\t\t\t\tcolumn_width.setdefault(k, 0)\n\t\t\t\t\tcolumn_width[k] = max([column_width[k], len(str(v)), len(k)])\n\n\t\tif not filter_list:\n\t\t\tfilter_list = list(column_width.keys())\n\n\t\t# create the header lines\n\t\toutput = ''\n\t\tkey_list = []\n\t\tfor key in filter_list:\n\t\t\twidth = column_width[key]\n\t\t\tkey = key.replace('!', '').replace('_', ' ')\n\n\t\t\tif capitalize:\n\t\t\t\tkey = key.capitalize()\n\n\t\t\tkey_list.append(unicode_ljust(key, width))\n\n\t\toutput += ' | '.join(key_list) + '\\n'\n\t\toutput += '-' * len(output) + '\\n'\n\n\t\t# create the data lines\n\t\tfor record in raw_data:\n\t\t\tobj_data = []\n\t\t\tfor key in filter_list:\n\t\t\t\twidth = column_width.get(key, len(key))\n\t\t\t\tvalue = record.get(key, '')\n\n\t\t\t\tif '!' in key:\n\t\t\t\t\tvalue = '*' * width\n\n\t\t\t\tif isinstance(value, (int, float)) or (isinstance(value, str) and value.isnumeric()):\n\t\t\t\t\tobj_data.append(unicode_rjust(str(value), width))\n\t\t\t\telse:\n\t\t\t\t\tobj_data.append(unicode_ljust(str(value), width))\n\n\t\t\toutput += ' | '.join(obj_data) + '\\n'\n\n\t\treturn output\n\n\t@classmethod\n\tdef as_columns(cls, entries: List[str], cols: int) -> str:\n\t\t\"\"\"\n\t\tWill format a list into a given number of columns\n\t\t\"\"\"\n\t\tchunks = []\n\t\toutput = ''\n\n\t\tfor i in range(0, len(entries), cols):\n\t\t\tchunks.append(entries[i:i + cols])\n\n\t\tfor row in chunks:\n\t\t\tout_fmt = '{: <30} ' * len(row)\n\t\t\toutput += out_fmt.format(*row) + '\\n'\n\n\t\treturn output\n\n\nclass Journald:\n\t@staticmethod\n\tdef log(message: str, level: int = logging.DEBUG) -> None:\n\t\ttry:\n\t\t\timport systemd.journal # type: ignore\n\t\texcept ModuleNotFoundError:\n\t\t\treturn None\n\n\t\tlog_adapter = logging.getLogger('archinstall')\n\t\tlog_fmt = logging.Formatter(\"[%(levelname)s]: %(message)s\")\n\t\tlog_ch = systemd.journal.JournalHandler()\n\t\tlog_ch.setFormatter(log_fmt)\n\t\tlog_adapter.addHandler(log_ch)\n\t\tlog_adapter.setLevel(logging.DEBUG)\n\n\t\tlog_adapter.log(level, message)\n\n\ndef _check_log_permissions():\n\tfilename = storage.get('LOG_FILE', None)\n\tlog_dir = storage.get('LOG_PATH', Path('./'))\n\n\tif not filename:\n\t\traise ValueError('No log file name defined')\n\n\tlog_file = log_dir / filename\n\n\ttry:\n\t\tlog_dir.mkdir(exist_ok=True, parents=True)\n\t\tlog_file.touch(exist_ok=True)\n\n\t\twith log_file.open('a') as fp:\n\t\t\tfp.write('')\n\texcept PermissionError:\n\t\t# Fallback to creating the log file in the current folder\n\t\tfallback_dir = Path('./').absolute()\n\t\tfallback_log_file = fallback_dir / filename\n\n\t\tfallback_log_file.touch(exist_ok=True)\n\n\t\tstorage['LOG_PATH'] = fallback_dir\n\t\twarn(f'Not enough permission to place log file at {log_file}, creating it in {fallback_log_file} instead')\n\n\ndef _supports_color() -> bool:\n\t\"\"\"\n\tFound first reference here:\n\t\thttps://stackoverflow.com/questions/7445658/how-to-detect-if-the-console-does-support-ansi-escape-codes-in-python\n\tAnd re-used this:\n\t\thttps://github.com/django/django/blob/master/django/core/management/color.py#L12\n\n\tReturn True if the running system's terminal supports color,\n\tand False otherwise.\n\t\"\"\"\n\tsupported_platform = sys.platform != 'win32' or 'ANSICON' in os.environ\n\n\t# isatty is not always implemented, #6223.\n\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n\treturn supported_platform and is_a_tty\n\n\nclass Font(Enum):\n\tbold = '1'\n\titalic = '3'\n\tunderscore = '4'\n\tblink = '5'\n\treverse = '7'\n\tconceal = '8'\n\n\ndef _stylize_output(\n\ttext: str,\n\tfg: str,\n\tbg: Optional[str],\n\treset: bool,\n\tfont: List[Font] = [],\n) -> str:\n\t\"\"\"\n\tHeavily influenced by:\n\t\thttps://github.com/django/django/blob/ae8338daf34fd746771e0678081999b656177bae/django/utils/termcolors.py#L13\n\tColor options here:\n\t\thttps://askubuntu.com/questions/528928/how-to-do-underline-bold-italic-strikethrough-color-background-and-size-i\n\n\tAdds styling to a text given a set of color arguments.\n\t\"\"\"\n\tcolors = {\n\t\t'black' : '0',\n\t\t'red' : '1',\n\t\t'green' : '2',\n\t\t'yellow' : '3',\n\t\t'blue' : '4',\n\t\t'magenta' : '5',\n\t\t'cyan' : '6',\n\t\t'white' : '7',\n\t\t'teal' : '8;5;109', # Extended 256-bit colors (not always supported)\n\t\t'orange' : '8;5;208', # https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html#256-colors\n\t\t'darkorange' : '8;5;202',\n\t\t'gray' : '8;5;246',\n\t\t'grey' : '8;5;246',\n\t\t'darkgray' : '8;5;240',\n\t\t'lightgray' : '8;5;256'\n\t}\n\n\tforeground = {key: f'3{colors[key]}' for key in colors}\n\tbackground = {key: f'4{colors[key]}' for key in colors}\n\tcode_list = []\n\n\tif text == '' and reset:\n\t\treturn '\\x1b[%sm' % '0'\n\n\tcode_list.append(foreground[str(fg)])\n\n\tif bg:\n\t\tcode_list.append(background[str(bg)])\n\n\tfor o in font:\n\t\tcode_list.append(o.value)\n\n\tansi = ';'.join(code_list)\n\n\treturn f'\\033[{ansi}m{text}\\033[0m'\n\n\ndef info(\n\t*msgs: str,\n\tlevel: int = logging.INFO,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef debug(\n\t*msgs: str,\n\tlevel: int = logging.DEBUG,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef error(\n\t*msgs: str,\n\tlevel: int = logging.ERROR,\n\tfg: str = 'red',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef warn(\n\t*msgs: str,\n\tlevel: int = logging.WARN,\n\tfg: str = 'yellow',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef log(\n\t*msgs: str,\n\tlevel: int = logging.INFO,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\t# leave this check here as we need to setup the logging\n\t# right from the beginning when the modules are loaded\n\t_check_log_permissions()\n\n\ttext = orig_string = ' '.join([str(x) for x in msgs])\n\n\t# Attempt to colorize the output if supported\n\t# Insert default colors and override with **kwargs\n\tif _supports_color():\n\t\ttext = _stylize_output(text, fg, bg, reset, font)\n\n\tlog_file: Path = storage['LOG_PATH'] / storage['LOG_FILE']\n\n\twith log_file.open('a') as fp:\n\t\tfp.write(f\"{orig_string}\\n\")\n\n\tJournald.log(text, level=level)\n\n\tfrom .menu import Menu\n\tif not Menu.is_menu_active():\n\t\t# Finally, print the log unless we skipped it based on level.\n\t\t# We use sys.stdout.write()+flush() instead of print() to try and\n\t\t# fix issue #94\n\t\tif level != logging.DEBUG or storage.get('arguments', {}).get('verbose', False):\n\t\t\tsys.stdout.write(f\"{text}\\n\")\n\t\t\tsys.stdout.flush()\n\ndef _count_wchars(string: str) -> int:\n\t\"Count the total number of wide characters contained in a string\"\n\treturn sum(unicodedata.east_asian_width(c) in 'FW' for c in string)\n\ndef unicode_ljust(string: str, width: int, fillbyte: str = ' ') -> str:\n\t\"\"\"Return a left-justified unicode string of length width.\n\t>>> unicode_ljust('Hello', 15, '*')\n\t'Hello**********'\n\t>>> unicode_ljust('你好', 15, '*')\n\t'你好***********'\n\t>>> unicode_ljust('안녕하세요', 15, '*')\n\t'안녕하세요*****'\n\t>>> unicode_ljust('こんにちは', 15, '*')\n\t'こんにちは*****'\n\t\"\"\"\n\treturn string.ljust(width - _count_wchars(string), fillbyte)\n\ndef unicode_rjust(string: str, width: int, fillbyte: str = ' ') -> str:\n\t\"\"\"Return a right-justified unicode string of length width.\n\t>>> unicode_rjust('Hello', 15, '*')\n\t'**********Hello'\n\t>>> unicode_rjust('你好', 15, '*')\n\t'***********你好'\n\t>>> unicode_rjust('안녕하세요', 15, '*')\n\t'*****안녕하세요'\n\t>>> unicode_rjust('こんにちは', 15, '*')\n\t'*****こんにちは'\n\t\"\"\"\n\treturn string.rjust(width - _count_wchars(string), fillbyte)\n", "path": "archinstall/lib/output.py" } ]
[ { "content": "import logging\nimport os\nimport sys\nimport unicodedata\nfrom enum import Enum\n\nfrom pathlib import Path\nfrom typing import Dict, Union, List, Any, Callable, Optional\nfrom dataclasses import asdict, is_dataclass\n\nfrom .storage import storage\n\n\nclass FormattedOutput:\n\n\t@classmethod\n\tdef _get_values(\n\t\tcls,\n\t\to: Any,\n\t\tclass_formatter: Optional[Union[str, Callable]] = None,\n\t\tfilter_list: List[str] = []\n\t) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tthe original values returned a dataclass as dict thru the call to some specific methods\n\t\tthis version allows thru the parameter class_formatter to call a dynamically selected formatting method.\n\t\tCan transmit a filter list to the class_formatter,\n\t\t\"\"\"\n\t\tif class_formatter:\n\t\t\t# if invoked per reference it has to be a standard function or a classmethod.\n\t\t\t# A method of an instance does not make sense\n\t\t\tif callable(class_formatter):\n\t\t\t\treturn class_formatter(o, filter_list)\n\t\t\t# if is invoked by name we restrict it to a method of the class. No need to mess more\n\t\t\telif hasattr(o, class_formatter) and callable(getattr(o, class_formatter)):\n\t\t\t\tfunc = getattr(o, class_formatter)\n\t\t\t\treturn func(filter_list)\n\n\t\t\traise ValueError('Unsupported formatting call')\n\t\telif hasattr(o, 'table_data'):\n\t\t\treturn o.table_data()\n\t\telif hasattr(o, 'json'):\n\t\t\treturn o.json()\n\t\telif is_dataclass(o):\n\t\t\treturn asdict(o)\n\t\telse:\n\t\t\treturn o.__dict__\n\n\t@classmethod\n\tdef as_table(\n\t\tcls,\n\t\tobj: List[Any],\n\t\tclass_formatter: Optional[Union[str, Callable]] = None,\n\t\tfilter_list: List[str] = [],\n\t\tcapitalize: bool = False\n\t) -> str:\n\t\t\"\"\" variant of as_table (subtly different code) which has two additional parameters\n\t\tfilter which is a list of fields which will be shon\n\t\tclass_formatter a special method to format the outgoing data\n\n\t\tA general comment, the format selected for the output (a string where every data record is separated by newline)\n\t\tis for compatibility with a print statement\n\t\tAs_table_filter can be a drop in replacement for as_table\n\t\t\"\"\"\n\t\traw_data = [cls._get_values(o, class_formatter, filter_list) for o in obj]\n\n\t\t# determine the maximum column size\n\t\tcolumn_width: Dict[str, int] = {}\n\t\tfor o in raw_data:\n\t\t\tfor k, v in o.items():\n\t\t\t\tif not filter_list or k in filter_list:\n\t\t\t\t\tcolumn_width.setdefault(k, 0)\n\t\t\t\t\tcolumn_width[k] = max([column_width[k], len(str(v)), len(k)])\n\n\t\tif not filter_list:\n\t\t\tfilter_list = list(column_width.keys())\n\n\t\t# create the header lines\n\t\toutput = ''\n\t\tkey_list = []\n\t\tfor key in filter_list:\n\t\t\twidth = column_width[key]\n\t\t\tkey = key.replace('!', '').replace('_', ' ')\n\n\t\t\tif capitalize:\n\t\t\t\tkey = key.capitalize()\n\n\t\t\tkey_list.append(unicode_ljust(key, width))\n\n\t\toutput += ' | '.join(key_list) + '\\n'\n\t\toutput += '-' * len(output) + '\\n'\n\n\t\t# create the data lines\n\t\tfor record in raw_data:\n\t\t\tobj_data = []\n\t\t\tfor key in filter_list:\n\t\t\t\twidth = column_width.get(key, len(key))\n\t\t\t\tvalue = record.get(key, '')\n\n\t\t\t\tif '!' in key:\n\t\t\t\t\tvalue = '*' * width\n\n\t\t\t\tif isinstance(value, (int, float)) or (isinstance(value, str) and value.isnumeric()):\n\t\t\t\t\tobj_data.append(unicode_rjust(str(value), width))\n\t\t\t\telse:\n\t\t\t\t\tobj_data.append(unicode_ljust(str(value), width))\n\n\t\t\toutput += ' | '.join(obj_data) + '\\n'\n\n\t\treturn output\n\n\t@classmethod\n\tdef as_columns(cls, entries: List[str], cols: int) -> str:\n\t\t\"\"\"\n\t\tWill format a list into a given number of columns\n\t\t\"\"\"\n\t\tchunks = []\n\t\toutput = ''\n\n\t\tfor i in range(0, len(entries), cols):\n\t\t\tchunks.append(entries[i:i + cols])\n\n\t\tfor row in chunks:\n\t\t\tout_fmt = '{: <30} ' * len(row)\n\t\t\toutput += out_fmt.format(*row) + '\\n'\n\n\t\treturn output\n\n\nclass Journald:\n\t@staticmethod\n\tdef log(message: str, level: int = logging.DEBUG) -> None:\n\t\ttry:\n\t\t\timport systemd.journal # type: ignore\n\t\texcept ModuleNotFoundError:\n\t\t\treturn None\n\n\t\tlog_adapter = logging.getLogger('archinstall')\n\t\tlog_fmt = logging.Formatter(\"[%(levelname)s]: %(message)s\")\n\t\tlog_ch = systemd.journal.JournalHandler()\n\t\tlog_ch.setFormatter(log_fmt)\n\t\tlog_adapter.addHandler(log_ch)\n\t\tlog_adapter.setLevel(logging.DEBUG)\n\n\t\tlog_adapter.log(level, message)\n\n\ndef _check_log_permissions():\n\tfilename = storage.get('LOG_FILE', None)\n\tlog_dir = storage.get('LOG_PATH', Path('./'))\n\n\tif not filename:\n\t\traise ValueError('No log file name defined')\n\n\tlog_file = log_dir / filename\n\n\ttry:\n\t\tlog_dir.mkdir(exist_ok=True, parents=True)\n\t\tlog_file.touch(exist_ok=True)\n\n\t\twith log_file.open('a') as fp:\n\t\t\tfp.write('')\n\texcept PermissionError:\n\t\t# Fallback to creating the log file in the current folder\n\t\tfallback_dir = Path('./').absolute()\n\t\tfallback_log_file = fallback_dir / filename\n\n\t\tfallback_log_file.touch(exist_ok=True)\n\n\t\tstorage['LOG_PATH'] = fallback_dir\n\t\twarn(f'Not enough permission to place log file at {log_file}, creating it in {fallback_log_file} instead')\n\n\ndef _supports_color() -> bool:\n\t\"\"\"\n\tFound first reference here:\n\t\thttps://stackoverflow.com/questions/7445658/how-to-detect-if-the-console-does-support-ansi-escape-codes-in-python\n\tAnd re-used this:\n\t\thttps://github.com/django/django/blob/master/django/core/management/color.py#L12\n\n\tReturn True if the running system's terminal supports color,\n\tand False otherwise.\n\t\"\"\"\n\tsupported_platform = sys.platform != 'win32' or 'ANSICON' in os.environ\n\n\t# isatty is not always implemented, #6223.\n\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n\treturn supported_platform and is_a_tty\n\n\nclass Font(Enum):\n\tbold = '1'\n\titalic = '3'\n\tunderscore = '4'\n\tblink = '5'\n\treverse = '7'\n\tconceal = '8'\n\n\ndef _stylize_output(\n\ttext: str,\n\tfg: str,\n\tbg: Optional[str],\n\treset: bool,\n\tfont: List[Font] = [],\n) -> str:\n\t\"\"\"\n\tHeavily influenced by:\n\t\thttps://github.com/django/django/blob/ae8338daf34fd746771e0678081999b656177bae/django/utils/termcolors.py#L13\n\tColor options here:\n\t\thttps://askubuntu.com/questions/528928/how-to-do-underline-bold-italic-strikethrough-color-background-and-size-i\n\n\tAdds styling to a text given a set of color arguments.\n\t\"\"\"\n\tcolors = {\n\t\t'black' : '0',\n\t\t'red' : '1',\n\t\t'green' : '2',\n\t\t'yellow' : '3',\n\t\t'blue' : '4',\n\t\t'magenta' : '5',\n\t\t'cyan' : '6',\n\t\t'white' : '7',\n\t\t'teal' : '8;5;109', # Extended 256-bit colors (not always supported)\n\t\t'orange' : '8;5;208', # https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html#256-colors\n\t\t'darkorange' : '8;5;202',\n\t\t'gray' : '8;5;246',\n\t\t'grey' : '8;5;246',\n\t\t'darkgray' : '8;5;240',\n\t\t'lightgray' : '8;5;256'\n\t}\n\n\tforeground = {key: f'3{colors[key]}' for key in colors}\n\tbackground = {key: f'4{colors[key]}' for key in colors}\n\tcode_list = []\n\n\tif text == '' and reset:\n\t\treturn '\\x1b[%sm' % '0'\n\n\tcode_list.append(foreground[str(fg)])\n\n\tif bg:\n\t\tcode_list.append(background[str(bg)])\n\n\tfor o in font:\n\t\tcode_list.append(o.value)\n\n\tansi = ';'.join(code_list)\n\n\treturn f'\\033[{ansi}m{text}\\033[0m'\n\n\ndef info(\n\t*msgs: str,\n\tlevel: int = logging.INFO,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef debug(\n\t*msgs: str,\n\tlevel: int = logging.DEBUG,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef error(\n\t*msgs: str,\n\tlevel: int = logging.ERROR,\n\tfg: str = 'red',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef warn(\n\t*msgs: str,\n\tlevel: int = logging.WARN,\n\tfg: str = 'yellow',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\tlog(*msgs, level=level, fg=fg, bg=bg, reset=reset, font=font)\n\n\ndef log(\n\t*msgs: str,\n\tlevel: int = logging.INFO,\n\tfg: str = 'white',\n\tbg: Optional[str] = None,\n\treset: bool = False,\n\tfont: List[Font] = []\n):\n\t# leave this check here as we need to setup the logging\n\t# right from the beginning when the modules are loaded\n\t_check_log_permissions()\n\n\ttext = orig_string = ' '.join([str(x) for x in msgs])\n\n\t# Attempt to colorize the output if supported\n\t# Insert default colors and override with **kwargs\n\tif _supports_color():\n\t\ttext = _stylize_output(text, fg, bg, reset, font)\n\n\tlog_file: Path = storage['LOG_PATH'] / storage['LOG_FILE']\n\n\twith log_file.open('a') as fp:\n\t\tfp.write(f\"{orig_string}\\n\")\n\n\tJournald.log(text, level=level)\n\n\tfrom .menu import Menu\n\tif not Menu.is_menu_active():\n\t\t# Finally, print the log unless we skipped it based on level.\n\t\t# We use sys.stdout.write()+flush() instead of print() to try and\n\t\t# fix issue #94\n\t\tif level != logging.DEBUG or storage.get('arguments', {}).get('verbose', False):\n\t\t\tsys.stdout.write(f\"{text}\\n\")\n\t\t\tsys.stdout.flush()\n\ndef _count_wchars(string: str) -> int:\n\t\"Count the total number of wide characters contained in a string\"\n\treturn sum(unicodedata.east_asian_width(c) in 'FW' for c in string)\n\ndef unicode_ljust(string: str, width: int, fillbyte: str = ' ') -> str:\n\t\"\"\"Return a left-justified unicode string of length width.\n\t>>> unicode_ljust('Hello', 15, '*')\n\t'Hello**********'\n\t>>> unicode_ljust('你好', 15, '*')\n\t'你好***********'\n\t>>> unicode_ljust('안녕하세요', 15, '*')\n\t'안녕하세요*****'\n\t>>> unicode_ljust('こんにちは', 15, '*')\n\t'こんにちは*****'\n\t\"\"\"\n\treturn string.ljust(width - _count_wchars(string), fillbyte)\n\ndef unicode_rjust(string: str, width: int, fillbyte: str = ' ') -> str:\n\t\"\"\"Return a right-justified unicode string of length width.\n\t>>> unicode_rjust('Hello', 15, '*')\n\t'**********Hello'\n\t>>> unicode_rjust('你好', 15, '*')\n\t'***********你好'\n\t>>> unicode_rjust('안녕하세요', 15, '*')\n\t'*****안녕하세요'\n\t>>> unicode_rjust('こんにちは', 15, '*')\n\t'*****こんにちは'\n\t\"\"\"\n\treturn string.rjust(width - _count_wchars(string), fillbyte)\n", "path": "archinstall/lib/output.py" } ]
diff --git a/archinstall/lib/output.py b/archinstall/lib/output.py index 945a6c4fdb..62a1ba2738 100644 --- a/archinstall/lib/output.py +++ b/archinstall/lib/output.py @@ -38,6 +38,8 @@ def _get_values( raise ValueError('Unsupported formatting call') elif hasattr(o, 'table_data'): return o.table_data() + elif hasattr(o, 'json'): + return o.json() elif is_dataclass(o): return asdict(o) else:
pyca__cryptography-2855
Redundant exclude in setup.py's find_packages I think the call can be reduced from ``` python find_packages( where="src", exclude=["_cffi_src", "_cffi_src.*", "tests", "tests.*"] ) ``` to ``` python find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]) ``` because of the `where="src"`. I verified by printing the output from setup.py
[ { "content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\", \"tests\", \"tests.*\"]\n ),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docs-test\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8-test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docs-test\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8-test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 88dfd7de72a4..d104ac3a2104 100644 --- a/setup.py +++ b/setup.py @@ -302,9 +302,7 @@ def run_tests(self): ], package_dir={"": "src"}, - packages=find_packages( - where="src", exclude=["_cffi_src", "_cffi_src.*", "tests", "tests.*"] - ), + packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]), include_package_data=True, install_requires=requirements,
docker__docker-py-3099
Unable to use docker to run containers started from Jupyter Notebook Hello, I'm currently following this tutorial: https://github.com/aws/amazon-sagemaker-examples/blob/main/sagemaker-pipelines/tabular/local-mode/sagemaker-pipelines-local-mode.ipynb I'm getting the following error from trying to run it (it uses docker in the background to run contaiiners). It executes everything when the following command is run: ``` python execution = pipeline.start() ``` I get the following error: ```python Creating q0r36pja78-algo-1-ywafn ... Creating q0r36pja78-algo-1-ywafn ... done Attaching to q0r36pja78-algo-1-ywafn Traceback (most recent call last): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\Scripts\docker-compose.exe\__main__.py", line 7, in <module> File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 81, in main command_func() File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 203, in perform_command handler(command, command_options) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\metrics\decorator.py", line 18, in wrapper result = fn(*args, **kwargs) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\main.py", line 1216, in up cascade_starter = log_printer.run() File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 88, in run for line in consume_queue(queue, self.cascade_stop): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 250, in consume_queue raise item.exc File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\cli\log_printer.py", line 162, in tail_container_logs for item in build_log_generator(container, log_args): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\utils.py", line 50, in split_buffer for data in stream_as_text(stream): File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\compose\utils.py", line 26, in stream_as_text for data in stream: File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\types\daemon.py", line 32, in __next__ return next(self._stream) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\api\client.py", line 418, in <genexpr> gen = (data for (_, data) in gen) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\utils\socket.py", line 95, in <genexpr> return ((STDOUT, frame) for frame in frames_iter_tty(socket)) File "C:\Users\franc\anaconda3\envs\sm-pipelines-modelbuild\lib\site-packages\docker\utils\socket.py", line 128, in frames_iter_tty if len(result) == 0: TypeError: object of type 'int' has no len() Pipeline step 'AbaloneProcess' FAILED. Failure message is: RuntimeError: Failed to run: ['docker-compose', '-f', 'C:\\Users\\franc\\AppData\\Local\\Temp\\tmpga4umz96\\docker-compose.yaml', 'up', '--build', '--abort-on-container-exit'] Pipeline execution c0a11456-aec5-48ec-adde-4ee45085efa8 FAILED because step 'AbaloneProcess' failed. ``` Version of the modules: Python 3.8.16 docker 6.0.1 docker-compose 1.29.2 docker desktop 4.16.3 Thanks
[ { "content": "import errno\nimport os\nimport select\nimport socket as pysocket\nimport struct\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nSTDOUT = 1\nSTDERR = 2\n\n\nclass SocketError(Exception):\n pass\n\n\n# NpipeSockets have their own error types\n# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')\nNPIPE_ENDED = 109\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n if isinstance(socket, getattr(pysocket, 'SocketIO')):\n return socket.read(n)\n return os.read(socket.fileno(), n)\n except OSError as e:\n if e.errno not in recoverable_errors:\n raise\n except Exception as e:\n is_pipe_ended = (isinstance(socket, NpipeSocket) and\n len(e.args) > 0 and\n e.args[0] == NPIPE_ENDED)\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n return 0\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = bytes()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_header(socket):\n \"\"\"\n Returns the stream and size of the next frame of data waiting to be read\n from socket, according to the protocol defined here:\n\n https://docs.docker.com/engine/api/v1.24/#attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return (-1, -1)\n\n stream, actual = struct.unpack('>BxxxL', data)\n return (stream, actual)\n\n\ndef frames_iter(socket, tty):\n \"\"\"\n Return a generator of frames read from socket. A frame is a tuple where\n the first item is the stream number and the second item is a chunk of data.\n\n If the tty setting is enabled, the streams are multiplexed into the stdout\n stream.\n \"\"\"\n if tty:\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\n else:\n return frames_iter_no_tty(socket)\n\n\ndef frames_iter_no_tty(socket):\n \"\"\"\n Returns a generator of data read from the socket when the tty setting is\n not enabled.\n \"\"\"\n while True:\n (stream, n) = next_frame_header(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield (stream, result)\n\n\ndef frames_iter_tty(socket):\n \"\"\"\n Return a generator of data read from the socket when the tty setting is\n enabled.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n\n\ndef consume_socket_output(frames, demux=False):\n \"\"\"\n Iterate through frames read from the socket and return the result.\n\n Args:\n\n demux (bool):\n If False, stdout and stderr are multiplexed, and the result is the\n concatenation of all the frames. If True, the streams are\n demultiplexed, and the result is a 2-tuple where each item is the\n concatenation of frames belonging to the same stream.\n \"\"\"\n if demux is False:\n # If the streams are multiplexed, the generator returns strings, that\n # we just need to concatenate.\n return bytes().join(frames)\n\n # If the streams are demultiplexed, the generator yields tuples\n # (stdout, stderr)\n out = [None, None]\n for frame in frames:\n # It is guaranteed that for each frame, one and only one stream\n # is not None.\n assert frame != (None, None)\n if frame[0] is not None:\n if out[0] is None:\n out[0] = frame[0]\n else:\n out[0] += frame[0]\n else:\n if out[1] is None:\n out[1] = frame[1]\n else:\n out[1] += frame[1]\n return tuple(out)\n\n\ndef demux_adaptor(stream_id, data):\n \"\"\"\n Utility to demultiplex stdout and stderr when reading frames from the\n socket.\n \"\"\"\n if stream_id == STDOUT:\n return (data, None)\n elif stream_id == STDERR:\n return (None, data)\n else:\n raise ValueError(f'{stream_id} is not a valid stream')\n", "path": "docker/utils/socket.py" } ]
[ { "content": "import errno\nimport os\nimport select\nimport socket as pysocket\nimport struct\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nSTDOUT = 1\nSTDERR = 2\n\n\nclass SocketError(Exception):\n pass\n\n\n# NpipeSockets have their own error types\n# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')\nNPIPE_ENDED = 109\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n if isinstance(socket, getattr(pysocket, 'SocketIO')):\n return socket.read(n)\n return os.read(socket.fileno(), n)\n except OSError as e:\n if e.errno not in recoverable_errors:\n raise\n except Exception as e:\n is_pipe_ended = (isinstance(socket, NpipeSocket) and\n len(e.args) > 0 and\n e.args[0] == NPIPE_ENDED)\n if is_pipe_ended:\n # npipes don't support duplex sockets, so we interpret\n # a PIPE_ENDED error as a close operation (0-length read).\n return ''\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = bytes()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_header(socket):\n \"\"\"\n Returns the stream and size of the next frame of data waiting to be read\n from socket, according to the protocol defined here:\n\n https://docs.docker.com/engine/api/v1.24/#attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return (-1, -1)\n\n stream, actual = struct.unpack('>BxxxL', data)\n return (stream, actual)\n\n\ndef frames_iter(socket, tty):\n \"\"\"\n Return a generator of frames read from socket. A frame is a tuple where\n the first item is the stream number and the second item is a chunk of data.\n\n If the tty setting is enabled, the streams are multiplexed into the stdout\n stream.\n \"\"\"\n if tty:\n return ((STDOUT, frame) for frame in frames_iter_tty(socket))\n else:\n return frames_iter_no_tty(socket)\n\n\ndef frames_iter_no_tty(socket):\n \"\"\"\n Returns a generator of data read from the socket when the tty setting is\n not enabled.\n \"\"\"\n while True:\n (stream, n) = next_frame_header(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield (stream, result)\n\n\ndef frames_iter_tty(socket):\n \"\"\"\n Return a generator of data read from the socket when the tty setting is\n enabled.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n\n\ndef consume_socket_output(frames, demux=False):\n \"\"\"\n Iterate through frames read from the socket and return the result.\n\n Args:\n\n demux (bool):\n If False, stdout and stderr are multiplexed, and the result is the\n concatenation of all the frames. If True, the streams are\n demultiplexed, and the result is a 2-tuple where each item is the\n concatenation of frames belonging to the same stream.\n \"\"\"\n if demux is False:\n # If the streams are multiplexed, the generator returns strings, that\n # we just need to concatenate.\n return bytes().join(frames)\n\n # If the streams are demultiplexed, the generator yields tuples\n # (stdout, stderr)\n out = [None, None]\n for frame in frames:\n # It is guaranteed that for each frame, one and only one stream\n # is not None.\n assert frame != (None, None)\n if frame[0] is not None:\n if out[0] is None:\n out[0] = frame[0]\n else:\n out[0] += frame[0]\n else:\n if out[1] is None:\n out[1] = frame[1]\n else:\n out[1] += frame[1]\n return tuple(out)\n\n\ndef demux_adaptor(stream_id, data):\n \"\"\"\n Utility to demultiplex stdout and stderr when reading frames from the\n socket.\n \"\"\"\n if stream_id == STDOUT:\n return (data, None)\n elif stream_id == STDERR:\n return (None, data)\n else:\n raise ValueError(f'{stream_id} is not a valid stream')\n", "path": "docker/utils/socket.py" } ]
diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 5aca30b17..47cb44f62 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -49,7 +49,7 @@ def read(socket, n=4096): if is_pipe_ended: # npipes don't support duplex sockets, so we interpret # a PIPE_ENDED error as a close operation (0-length read). - return 0 + return '' raise
archlinux__archinstall-2278
no default value for uki key in configurations.json (archinstall 2.7.0) When using archinstall in a system with no support for uefi, the user is not prompted to choose a value for the uki key. However, when running archinstall with a config file without a uki key defined, it errors out since there is no such key defined with a default value. At least thats what I understood from the problem looking at PR(https://github.com/archlinux/archinstall/pull/1519). Adding uki: false in the config file fixed this for me. by the way, how is uefi supported checked? I though it was impossible to know from OS side
[ { "content": "from pathlib import Path\nfrom typing import Any, TYPE_CHECKING, Optional\n\nimport archinstall\nfrom archinstall import info, debug\nfrom archinstall import SysInfo\nfrom archinstall.lib import locale\nfrom archinstall.lib import disk\nfrom archinstall.lib.global_menu import GlobalMenu\nfrom archinstall.lib.configuration import ConfigurationOutput\nfrom archinstall.lib.installer import Installer\nfrom archinstall.lib.menu import Menu\nfrom archinstall.lib.mirrors import use_mirrors, add_custom_mirrors\nfrom archinstall.lib.models import AudioConfiguration\nfrom archinstall.lib.models.bootloader import Bootloader\nfrom archinstall.lib.models.network_configuration import NetworkConfiguration\nfrom archinstall.lib.profile.profiles_handler import profile_handler\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nif archinstall.arguments.get('help'):\n\tprint(\"See `man archinstall` for help.\")\n\texit(0)\n\n\ndef ask_user_questions():\n\t\"\"\"\n\t\tFirst, we'll ask the user for a bunch of user input.\n\t\tNot until we're satisfied with what we want to install\n\t\twill we continue with the actual installation steps.\n\t\"\"\"\n\n\t# ref: https://github.com/archlinux/archinstall/pull/831\n\t# we'll set NTP to true by default since this is also\n\t# the default value specified in the menu options; in\n\t# case it will be changed by the user we'll also update\n\t# the system immediately\n\tglobal_menu = GlobalMenu(data_store=archinstall.arguments)\n\n\tglobal_menu.enable('archinstall-language')\n\n\t# Set which region to download packages from during the installation\n\tglobal_menu.enable('mirror_config')\n\n\tglobal_menu.enable('locale_config')\n\n\tglobal_menu.enable('disk_config', mandatory=True)\n\n\t# Specify disk encryption options\n\tglobal_menu.enable('disk_encryption')\n\n\t# Ask which boot-loader to use (will only ask if we're in UEFI mode, otherwise will default to GRUB)\n\tglobal_menu.enable('bootloader')\n\n\tglobal_menu.enable('uki')\n\n\tglobal_menu.enable('swap')\n\n\t# Get the hostname for the machine\n\tglobal_menu.enable('hostname')\n\n\t# Ask for a root password (optional, but triggers requirement for super-user if skipped)\n\tglobal_menu.enable('!root-password', mandatory=True)\n\n\tglobal_menu.enable('!users', mandatory=True)\n\n\t# Ask for archinstall-specific profiles_bck (such as desktop environments etc)\n\tglobal_menu.enable('profile_config')\n\n\t# Ask about audio server selection if one is not already set\n\tglobal_menu.enable('audio_config')\n\n\t# Ask for preferred kernel:\n\tglobal_menu.enable('kernels', mandatory=True)\n\n\tglobal_menu.enable('packages')\n\n\tif archinstall.arguments.get('advanced', False):\n\t\t# Enable parallel downloads\n\t\tglobal_menu.enable('parallel downloads')\n\n\t# Ask or Call the helper function that asks the user to optionally configure a network.\n\tglobal_menu.enable('network_config')\n\n\tglobal_menu.enable('timezone')\n\n\tglobal_menu.enable('ntp')\n\n\tglobal_menu.enable('additional-repositories')\n\n\tglobal_menu.enable('__separator__')\n\n\tglobal_menu.enable('save_config')\n\tglobal_menu.enable('install')\n\tglobal_menu.enable('abort')\n\n\tglobal_menu.run()\n\n\ndef perform_installation(mountpoint: Path):\n\t\"\"\"\n\tPerforms the installation steps on a block device.\n\tOnly requirement is that the block devices are\n\tformatted and setup prior to entering this function.\n\t\"\"\"\n\tinfo('Starting installation')\n\tdisk_config: disk.DiskLayoutConfiguration = archinstall.arguments['disk_config']\n\n\t# Retrieve list of additional repositories and set boolean values appropriately\n\tenable_testing = 'testing' in archinstall.arguments.get('additional-repositories', [])\n\tenable_multilib = 'multilib' in archinstall.arguments.get('additional-repositories', [])\n\trun_mkinitcpio = not archinstall.arguments.get('uki')\n\tlocale_config: locale.LocaleConfiguration = archinstall.arguments['locale_config']\n\tdisk_encryption: disk.DiskEncryption = archinstall.arguments.get('disk_encryption', None)\n\n\twith Installer(\n\t\tmountpoint,\n\t\tdisk_config,\n\t\tdisk_encryption=disk_encryption,\n\t\tkernels=archinstall.arguments.get('kernels', ['linux'])\n\t) as installation:\n\t\t# Mount all the drives to the desired mountpoint\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tinstallation.mount_ordered_layout()\n\n\t\tinstallation.sanity_check()\n\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tif disk_encryption and disk_encryption.encryption_type != disk.EncryptionType.NoEncryption:\n\t\t\t\t# generate encryption key files for the mounted luks devices\n\t\t\t\tinstallation.generate_key_files()\n\n\t\t# Set mirrors used by pacstrap (outside of installation)\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tif mirror_config.mirror_regions:\n\t\t\t\tuse_mirrors(mirror_config.mirror_regions)\n\t\t\tif mirror_config.custom_mirrors:\n\t\t\t\tadd_custom_mirrors(mirror_config.custom_mirrors)\n\n\t\tinstallation.minimal_installation(\n\t\t\ttesting=enable_testing,\n\t\t\tmultilib=enable_multilib,\n\t\t\tmkinitcpio=run_mkinitcpio,\n\t\t\thostname=archinstall.arguments.get('hostname', 'archlinux'),\n\t\t\tlocale_config=locale_config\n\t\t)\n\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tinstallation.set_mirrors(mirror_config) # Set the mirrors in the installation medium\n\n\t\tif archinstall.arguments.get('swap'):\n\t\t\tinstallation.setup_swap('zram')\n\n\t\tif archinstall.arguments.get(\"bootloader\") == Bootloader.Grub and SysInfo.has_uefi():\n\t\t\tinstallation.add_additional_packages(\"grub\")\n\n\t\tinstallation.add_bootloader(\n\t\t\tarchinstall.arguments[\"bootloader\"],\n\t\t\tarchinstall.arguments[\"uki\"]\n\t\t)\n\n\t\t# If user selected to copy the current ISO network configuration\n\t\t# Perform a copy of the config\n\t\tnetwork_config: Optional[NetworkConfiguration] = archinstall.arguments.get('network_config', None)\n\n\t\tif network_config:\n\t\t\tnetwork_config.install_network_config(\n\t\t\t\tinstallation,\n\t\t\t\tarchinstall.arguments.get('profile_config', None)\n\t\t\t)\n\n\t\tif users := archinstall.arguments.get('!users', None):\n\t\t\tinstallation.create_users(users)\n\n\t\taudio_config: Optional[AudioConfiguration] = archinstall.arguments.get('audio_config', None)\n\t\tif audio_config:\n\t\t\taudio_config.install_audio_config(installation)\n\t\telse:\n\t\t\tinfo(\"No audio server will be installed\")\n\n\t\tif archinstall.arguments.get('packages', None) and archinstall.arguments.get('packages', None)[0] != '':\n\t\t\tinstallation.add_additional_packages(archinstall.arguments.get('packages', None))\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_handler.install_profile_config(installation, profile_config)\n\n\t\tif timezone := archinstall.arguments.get('timezone', None):\n\t\t\tinstallation.set_timezone(timezone)\n\n\t\tif archinstall.arguments.get('ntp', False):\n\t\t\tinstallation.activate_time_synchronization()\n\n\t\tif archinstall.accessibility_tools_in_use():\n\t\t\tinstallation.enable_espeakup()\n\n\t\tif (root_pw := archinstall.arguments.get('!root-password', None)) and len(root_pw):\n\t\t\tinstallation.user_set_pw('root', root_pw)\n\n\t\t# This step must be after profile installs to allow profiles_bck to install language pre-requisites.\n\t\t# After which, this step will set the language both for console and x11 if x11 was installed for instance.\n\t\tinstallation.set_keyboard_language(locale_config.kb_layout)\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_config.profile.post_install(installation)\n\n\t\t# If the user provided a list of services to be enabled, pass the list to the enable_service function.\n\t\t# Note that while it's called enable_service, it can actually take a list of services and iterate it.\n\t\tif archinstall.arguments.get('services', None):\n\t\t\tinstallation.enable_service(archinstall.arguments.get('services', []))\n\n\t\t# If the user provided custom commands to be run post-installation, execute them now.\n\t\tif archinstall.arguments.get('custom-commands', None):\n\t\t\tarchinstall.run_custom_user_commands(archinstall.arguments['custom-commands'], installation)\n\n\t\tinstallation.genfstab()\n\n\t\tinfo(\"For post-installation tips, see https://wiki.archlinux.org/index.php/Installation_guide#Post-installation\")\n\n\t\tif not archinstall.arguments.get('silent'):\n\t\t\tprompt = str(_('Would you like to chroot into the newly created installation and perform post-installation configuration?'))\n\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n\t\t\tif choice.value == Menu.yes():\n\t\t\t\ttry:\n\t\t\t\t\tinstallation.drop_to_shell()\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\tdebug(f\"Disk states after installing: {disk.disk_layouts()}\")\n\n\nif not archinstall.arguments.get('silent'):\n\task_user_questions()\n\nconfig_output = ConfigurationOutput(archinstall.arguments)\n\nif not archinstall.arguments.get('silent'):\n\tconfig_output.show()\n\nconfig_output.save()\n\nif archinstall.arguments.get('dry_run'):\n\texit(0)\n\nif not archinstall.arguments.get('silent'):\n\tinput(str(_('Press Enter to continue.')))\n\nfs_handler = disk.FilesystemHandler(\n\tarchinstall.arguments['disk_config'],\n\tarchinstall.arguments.get('disk_encryption', None)\n)\n\nfs_handler.perform_filesystem_operations()\n\nperform_installation(archinstall.storage.get('MOUNT_POINT', Path('/mnt')))\n", "path": "archinstall/scripts/guided.py" } ]
[ { "content": "from pathlib import Path\nfrom typing import Any, TYPE_CHECKING, Optional\n\nimport archinstall\nfrom archinstall import info, debug\nfrom archinstall import SysInfo\nfrom archinstall.lib import locale\nfrom archinstall.lib import disk\nfrom archinstall.lib.global_menu import GlobalMenu\nfrom archinstall.lib.configuration import ConfigurationOutput\nfrom archinstall.lib.installer import Installer\nfrom archinstall.lib.menu import Menu\nfrom archinstall.lib.mirrors import use_mirrors, add_custom_mirrors\nfrom archinstall.lib.models import AudioConfiguration\nfrom archinstall.lib.models.bootloader import Bootloader\nfrom archinstall.lib.models.network_configuration import NetworkConfiguration\nfrom archinstall.lib.profile.profiles_handler import profile_handler\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nif archinstall.arguments.get('help'):\n\tprint(\"See `man archinstall` for help.\")\n\texit(0)\n\n\ndef ask_user_questions():\n\t\"\"\"\n\t\tFirst, we'll ask the user for a bunch of user input.\n\t\tNot until we're satisfied with what we want to install\n\t\twill we continue with the actual installation steps.\n\t\"\"\"\n\n\t# ref: https://github.com/archlinux/archinstall/pull/831\n\t# we'll set NTP to true by default since this is also\n\t# the default value specified in the menu options; in\n\t# case it will be changed by the user we'll also update\n\t# the system immediately\n\tglobal_menu = GlobalMenu(data_store=archinstall.arguments)\n\n\tglobal_menu.enable('archinstall-language')\n\n\t# Set which region to download packages from during the installation\n\tglobal_menu.enable('mirror_config')\n\n\tglobal_menu.enable('locale_config')\n\n\tglobal_menu.enable('disk_config', mandatory=True)\n\n\t# Specify disk encryption options\n\tglobal_menu.enable('disk_encryption')\n\n\t# Ask which boot-loader to use (will only ask if we're in UEFI mode, otherwise will default to GRUB)\n\tglobal_menu.enable('bootloader')\n\n\tglobal_menu.enable('uki')\n\n\tglobal_menu.enable('swap')\n\n\t# Get the hostname for the machine\n\tglobal_menu.enable('hostname')\n\n\t# Ask for a root password (optional, but triggers requirement for super-user if skipped)\n\tglobal_menu.enable('!root-password', mandatory=True)\n\n\tglobal_menu.enable('!users', mandatory=True)\n\n\t# Ask for archinstall-specific profiles_bck (such as desktop environments etc)\n\tglobal_menu.enable('profile_config')\n\n\t# Ask about audio server selection if one is not already set\n\tglobal_menu.enable('audio_config')\n\n\t# Ask for preferred kernel:\n\tglobal_menu.enable('kernels', mandatory=True)\n\n\tglobal_menu.enable('packages')\n\n\tif archinstall.arguments.get('advanced', False):\n\t\t# Enable parallel downloads\n\t\tglobal_menu.enable('parallel downloads')\n\n\t# Ask or Call the helper function that asks the user to optionally configure a network.\n\tglobal_menu.enable('network_config')\n\n\tglobal_menu.enable('timezone')\n\n\tglobal_menu.enable('ntp')\n\n\tglobal_menu.enable('additional-repositories')\n\n\tglobal_menu.enable('__separator__')\n\n\tglobal_menu.enable('save_config')\n\tglobal_menu.enable('install')\n\tglobal_menu.enable('abort')\n\n\tglobal_menu.run()\n\n\ndef perform_installation(mountpoint: Path):\n\t\"\"\"\n\tPerforms the installation steps on a block device.\n\tOnly requirement is that the block devices are\n\tformatted and setup prior to entering this function.\n\t\"\"\"\n\tinfo('Starting installation')\n\tdisk_config: disk.DiskLayoutConfiguration = archinstall.arguments['disk_config']\n\n\t# Retrieve list of additional repositories and set boolean values appropriately\n\tenable_testing = 'testing' in archinstall.arguments.get('additional-repositories', [])\n\tenable_multilib = 'multilib' in archinstall.arguments.get('additional-repositories', [])\n\trun_mkinitcpio = not archinstall.arguments.get('uki')\n\tlocale_config: locale.LocaleConfiguration = archinstall.arguments['locale_config']\n\tdisk_encryption: disk.DiskEncryption = archinstall.arguments.get('disk_encryption', None)\n\n\twith Installer(\n\t\tmountpoint,\n\t\tdisk_config,\n\t\tdisk_encryption=disk_encryption,\n\t\tkernels=archinstall.arguments.get('kernels', ['linux'])\n\t) as installation:\n\t\t# Mount all the drives to the desired mountpoint\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tinstallation.mount_ordered_layout()\n\n\t\tinstallation.sanity_check()\n\n\t\tif disk_config.config_type != disk.DiskLayoutType.Pre_mount:\n\t\t\tif disk_encryption and disk_encryption.encryption_type != disk.EncryptionType.NoEncryption:\n\t\t\t\t# generate encryption key files for the mounted luks devices\n\t\t\t\tinstallation.generate_key_files()\n\n\t\t# Set mirrors used by pacstrap (outside of installation)\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tif mirror_config.mirror_regions:\n\t\t\t\tuse_mirrors(mirror_config.mirror_regions)\n\t\t\tif mirror_config.custom_mirrors:\n\t\t\t\tadd_custom_mirrors(mirror_config.custom_mirrors)\n\n\t\tinstallation.minimal_installation(\n\t\t\ttesting=enable_testing,\n\t\t\tmultilib=enable_multilib,\n\t\t\tmkinitcpio=run_mkinitcpio,\n\t\t\thostname=archinstall.arguments.get('hostname', 'archlinux'),\n\t\t\tlocale_config=locale_config\n\t\t)\n\n\t\tif mirror_config := archinstall.arguments.get('mirror_config', None):\n\t\t\tinstallation.set_mirrors(mirror_config) # Set the mirrors in the installation medium\n\n\t\tif archinstall.arguments.get('swap'):\n\t\t\tinstallation.setup_swap('zram')\n\n\t\tif archinstall.arguments.get(\"bootloader\") == Bootloader.Grub and SysInfo.has_uefi():\n\t\t\tinstallation.add_additional_packages(\"grub\")\n\n\t\tinstallation.add_bootloader(\n\t\t\tarchinstall.arguments[\"bootloader\"],\n\t\t\tarchinstall.arguments.get('uki', False)\n\t\t)\n\n\t\t# If user selected to copy the current ISO network configuration\n\t\t# Perform a copy of the config\n\t\tnetwork_config: Optional[NetworkConfiguration] = archinstall.arguments.get('network_config', None)\n\n\t\tif network_config:\n\t\t\tnetwork_config.install_network_config(\n\t\t\t\tinstallation,\n\t\t\t\tarchinstall.arguments.get('profile_config', None)\n\t\t\t)\n\n\t\tif users := archinstall.arguments.get('!users', None):\n\t\t\tinstallation.create_users(users)\n\n\t\taudio_config: Optional[AudioConfiguration] = archinstall.arguments.get('audio_config', None)\n\t\tif audio_config:\n\t\t\taudio_config.install_audio_config(installation)\n\t\telse:\n\t\t\tinfo(\"No audio server will be installed\")\n\n\t\tif archinstall.arguments.get('packages', None) and archinstall.arguments.get('packages', None)[0] != '':\n\t\t\tinstallation.add_additional_packages(archinstall.arguments.get('packages', None))\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_handler.install_profile_config(installation, profile_config)\n\n\t\tif timezone := archinstall.arguments.get('timezone', None):\n\t\t\tinstallation.set_timezone(timezone)\n\n\t\tif archinstall.arguments.get('ntp', False):\n\t\t\tinstallation.activate_time_synchronization()\n\n\t\tif archinstall.accessibility_tools_in_use():\n\t\t\tinstallation.enable_espeakup()\n\n\t\tif (root_pw := archinstall.arguments.get('!root-password', None)) and len(root_pw):\n\t\t\tinstallation.user_set_pw('root', root_pw)\n\n\t\t# This step must be after profile installs to allow profiles_bck to install language pre-requisites.\n\t\t# After which, this step will set the language both for console and x11 if x11 was installed for instance.\n\t\tinstallation.set_keyboard_language(locale_config.kb_layout)\n\n\t\tif profile_config := archinstall.arguments.get('profile_config', None):\n\t\t\tprofile_config.profile.post_install(installation)\n\n\t\t# If the user provided a list of services to be enabled, pass the list to the enable_service function.\n\t\t# Note that while it's called enable_service, it can actually take a list of services and iterate it.\n\t\tif archinstall.arguments.get('services', None):\n\t\t\tinstallation.enable_service(archinstall.arguments.get('services', []))\n\n\t\t# If the user provided custom commands to be run post-installation, execute them now.\n\t\tif archinstall.arguments.get('custom-commands', None):\n\t\t\tarchinstall.run_custom_user_commands(archinstall.arguments['custom-commands'], installation)\n\n\t\tinstallation.genfstab()\n\n\t\tinfo(\"For post-installation tips, see https://wiki.archlinux.org/index.php/Installation_guide#Post-installation\")\n\n\t\tif not archinstall.arguments.get('silent'):\n\t\t\tprompt = str(_('Would you like to chroot into the newly created installation and perform post-installation configuration?'))\n\t\t\tchoice = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n\t\t\tif choice.value == Menu.yes():\n\t\t\t\ttry:\n\t\t\t\t\tinstallation.drop_to_shell()\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\tdebug(f\"Disk states after installing: {disk.disk_layouts()}\")\n\n\nif not archinstall.arguments.get('silent'):\n\task_user_questions()\n\nconfig_output = ConfigurationOutput(archinstall.arguments)\n\nif not archinstall.arguments.get('silent'):\n\tconfig_output.show()\n\nconfig_output.save()\n\nif archinstall.arguments.get('dry_run'):\n\texit(0)\n\nif not archinstall.arguments.get('silent'):\n\tinput(str(_('Press Enter to continue.')))\n\nfs_handler = disk.FilesystemHandler(\n\tarchinstall.arguments['disk_config'],\n\tarchinstall.arguments.get('disk_encryption', None)\n)\n\nfs_handler.perform_filesystem_operations()\n\nperform_installation(archinstall.storage.get('MOUNT_POINT', Path('/mnt')))\n", "path": "archinstall/scripts/guided.py" } ]
diff --git a/archinstall/scripts/guided.py b/archinstall/scripts/guided.py index 6acbdbf3b2..44b0ae17f6 100644 --- a/archinstall/scripts/guided.py +++ b/archinstall/scripts/guided.py @@ -158,7 +158,7 @@ def perform_installation(mountpoint: Path): installation.add_bootloader( archinstall.arguments["bootloader"], - archinstall.arguments["uki"] + archinstall.arguments.get('uki', False) ) # If user selected to copy the current ISO network configuration
python-gitlab__python-gitlab-1213
RefreshMixin.refresh() doesn't remove removed attributes ## Description of the problem, including code/CLI snippet When attributes disappear from an object on the server `RefreshMixin.refresh()` doesn't remove them. For instance if a job that has artifacts will have an `artifacts_file` attribute. If you call `.delete_artifacts()` on it, then call `.refresh()` the `artifacts_file` attribute will still be there. ```bash # get a job with artifacts job = project.jobs.get(job_id) # will succeed assert hasattr(job, "artifacts_file") # now delete the artifacts from the server job.delete_artifacts() # This will fail because the artifacts_file is still there; refresh() didn't remove it job.refresh() assert not hasattr(job, "artifacts_file") # If you get the job again from the project it'll be fine job = project.jobs.get(job_id) assert not hasattr(job, "artifacts_file") ``` ```python ``` ## Expected Behavior I would expect that the attributes dict on any object should be exactly the same between a freshly retrieved object and an old object after calling `.refresh()` ```python o.refresh() # After a refresh this should always be true o.attributes == o.manager.get(o.id).attributes ``` ## Actual Behavior They're not equal ## Specifications - python-gitlab version: `v2.4.0` - API version you are using (v3/v4): `v4` - Gitlab server version (or gitlab.com): `13.2.3`
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport importlib\n\n\nclass RESTObject(object):\n \"\"\"Represents an object built from server data.\n\n It holds the attributes know from the server, and the updated attributes in\n another. This allows smart updates, if the object allows it.\n\n You can redefine ``_id_attr`` in child classes to specify which attribute\n must be used as uniq ID. ``None`` means that the object can be updated\n without ID in the url.\n \"\"\"\n\n _id_attr = \"id\"\n\n def __init__(self, manager, attrs):\n self.__dict__.update(\n {\n \"manager\": manager,\n \"_attrs\": attrs,\n \"_updated_attrs\": {},\n \"_module\": importlib.import_module(self.__module__),\n }\n )\n self.__dict__[\"_parent_attrs\"] = self.manager.parent_attrs\n self._create_managers()\n\n def __getstate__(self):\n state = self.__dict__.copy()\n module = state.pop(\"_module\")\n state[\"_module_name\"] = module.__name__\n return state\n\n def __setstate__(self, state):\n module_name = state.pop(\"_module_name\")\n self.__dict__.update(state)\n self.__dict__[\"_module\"] = importlib.import_module(module_name)\n\n def __getattr__(self, name):\n try:\n return self.__dict__[\"_updated_attrs\"][name]\n except KeyError:\n try:\n value = self.__dict__[\"_attrs\"][name]\n\n # If the value is a list, we copy it in the _updated_attrs dict\n # because we are not able to detect changes made on the object\n # (append, insert, pop, ...). Without forcing the attr\n # creation __setattr__ is never called, the list never ends up\n # in the _updated_attrs dict, and the update() and save()\n # method never push the new data to the server.\n # See https://github.com/python-gitlab/python-gitlab/issues/306\n #\n # note: _parent_attrs will only store simple values (int) so we\n # don't make this check in the next except block.\n if isinstance(value, list):\n self.__dict__[\"_updated_attrs\"][name] = value[:]\n return self.__dict__[\"_updated_attrs\"][name]\n\n return value\n\n except KeyError:\n try:\n return self.__dict__[\"_parent_attrs\"][name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n self.__dict__[\"_updated_attrs\"][name] = value\n\n def __str__(self):\n data = self._attrs.copy()\n data.update(self._updated_attrs)\n return \"%s => %s\" % (type(self), data)\n\n def __repr__(self):\n if self._id_attr:\n return \"<%s %s:%s>\" % (\n self.__class__.__name__,\n self._id_attr,\n self.get_id(),\n )\n else:\n return \"<%s>\" % self.__class__.__name__\n\n def __eq__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() == other.get_id()\n return super(RESTObject, self) == other\n\n def __ne__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() != other.get_id()\n return super(RESTObject, self) != other\n\n def __dir__(self):\n return super(RESTObject, self).__dir__() + list(self.attributes)\n\n def __hash__(self):\n if not self.get_id():\n return super(RESTObject, self).__hash__()\n return hash(self.get_id())\n\n def _create_managers(self):\n managers = getattr(self, \"_managers\", None)\n if managers is None:\n return\n\n for attr, cls_name in self._managers:\n cls = getattr(self._module, cls_name)\n manager = cls(self.manager.gitlab, parent=self)\n self.__dict__[attr] = manager\n\n def _update_attrs(self, new_attrs):\n self.__dict__[\"_updated_attrs\"] = {}\n self.__dict__[\"_attrs\"].update(new_attrs)\n\n def get_id(self):\n \"\"\"Returns the id of the resource.\"\"\"\n if self._id_attr is None or not hasattr(self, self._id_attr):\n return None\n return getattr(self, self._id_attr)\n\n @property\n def attributes(self):\n d = self.__dict__[\"_updated_attrs\"].copy()\n d.update(self.__dict__[\"_attrs\"])\n d.update(self.__dict__[\"_parent_attrs\"])\n return d\n\n\nclass RESTObjectList(object):\n \"\"\"Generator object representing a list of RESTObject's.\n\n This generator uses the Gitlab pagination system to fetch new data when\n required.\n\n Note: you should not instanciate such objects, they are returned by calls\n to RESTManager.list()\n\n Args:\n manager: Manager to attach to the created objects\n obj_cls: Type of objects to create from the json data\n _list: A GitlabList object\n \"\"\"\n\n def __init__(self, manager, obj_cls, _list):\n \"\"\"Creates an objects list from a GitlabList.\n\n You should not create objects of this type, but use managers list()\n methods instead.\n\n Args:\n manager: the RESTManager to attach to the objects\n obj_cls: the class of the created objects\n _list: the GitlabList holding the data\n \"\"\"\n self.manager = manager\n self._obj_cls = obj_cls\n self._list = _list\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return len(self._list)\n\n def __next__(self):\n return self.next()\n\n def next(self):\n data = self._list.next()\n return self._obj_cls(self.manager, data)\n\n @property\n def current_page(self):\n \"\"\"The current page number.\"\"\"\n return self._list.current_page\n\n @property\n def prev_page(self):\n \"\"\"The previous page number.\n\n If None, the current page is the first.\n \"\"\"\n return self._list.prev_page\n\n @property\n def next_page(self):\n \"\"\"The next page number.\n\n If None, the current page is the last.\n \"\"\"\n return self._list.next_page\n\n @property\n def per_page(self):\n \"\"\"The number of items per page.\"\"\"\n return self._list.per_page\n\n @property\n def total_pages(self):\n \"\"\"The total number of pages.\"\"\"\n return self._list.total_pages\n\n @property\n def total(self):\n \"\"\"The total number of items.\"\"\"\n return self._list.total\n\n\nclass RESTManager(object):\n \"\"\"Base class for CRUD operations on objects.\n\n Derived class must define ``_path`` and ``_obj_cls``.\n\n ``_path``: Base URL path on which requests will be sent (e.g. '/projects')\n ``_obj_cls``: The class of objects that will be created\n \"\"\"\n\n _path = None\n _obj_cls = None\n\n def __init__(self, gl, parent=None):\n \"\"\"REST manager constructor.\n\n Args:\n gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make\n requests.\n parent: REST object to which the manager is attached.\n \"\"\"\n self.gitlab = gl\n self._parent = parent # for nested managers\n self._computed_path = self._compute_path()\n\n @property\n def parent_attrs(self):\n return self._parent_attrs\n\n def _compute_path(self, path=None):\n self._parent_attrs = {}\n if path is None:\n path = self._path\n if self._parent is None or not hasattr(self, \"_from_parent_attrs\"):\n return path\n\n data = {\n self_attr: getattr(self._parent, parent_attr, None)\n for self_attr, parent_attr in self._from_parent_attrs.items()\n }\n self._parent_attrs = data\n return path % data\n\n @property\n def path(self):\n return self._computed_path\n", "path": "gitlab/base.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport importlib\n\n\nclass RESTObject(object):\n \"\"\"Represents an object built from server data.\n\n It holds the attributes know from the server, and the updated attributes in\n another. This allows smart updates, if the object allows it.\n\n You can redefine ``_id_attr`` in child classes to specify which attribute\n must be used as uniq ID. ``None`` means that the object can be updated\n without ID in the url.\n \"\"\"\n\n _id_attr = \"id\"\n\n def __init__(self, manager, attrs):\n self.__dict__.update(\n {\n \"manager\": manager,\n \"_attrs\": attrs,\n \"_updated_attrs\": {},\n \"_module\": importlib.import_module(self.__module__),\n }\n )\n self.__dict__[\"_parent_attrs\"] = self.manager.parent_attrs\n self._create_managers()\n\n def __getstate__(self):\n state = self.__dict__.copy()\n module = state.pop(\"_module\")\n state[\"_module_name\"] = module.__name__\n return state\n\n def __setstate__(self, state):\n module_name = state.pop(\"_module_name\")\n self.__dict__.update(state)\n self.__dict__[\"_module\"] = importlib.import_module(module_name)\n\n def __getattr__(self, name):\n try:\n return self.__dict__[\"_updated_attrs\"][name]\n except KeyError:\n try:\n value = self.__dict__[\"_attrs\"][name]\n\n # If the value is a list, we copy it in the _updated_attrs dict\n # because we are not able to detect changes made on the object\n # (append, insert, pop, ...). Without forcing the attr\n # creation __setattr__ is never called, the list never ends up\n # in the _updated_attrs dict, and the update() and save()\n # method never push the new data to the server.\n # See https://github.com/python-gitlab/python-gitlab/issues/306\n #\n # note: _parent_attrs will only store simple values (int) so we\n # don't make this check in the next except block.\n if isinstance(value, list):\n self.__dict__[\"_updated_attrs\"][name] = value[:]\n return self.__dict__[\"_updated_attrs\"][name]\n\n return value\n\n except KeyError:\n try:\n return self.__dict__[\"_parent_attrs\"][name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n self.__dict__[\"_updated_attrs\"][name] = value\n\n def __str__(self):\n data = self._attrs.copy()\n data.update(self._updated_attrs)\n return \"%s => %s\" % (type(self), data)\n\n def __repr__(self):\n if self._id_attr:\n return \"<%s %s:%s>\" % (\n self.__class__.__name__,\n self._id_attr,\n self.get_id(),\n )\n else:\n return \"<%s>\" % self.__class__.__name__\n\n def __eq__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() == other.get_id()\n return super(RESTObject, self) == other\n\n def __ne__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() != other.get_id()\n return super(RESTObject, self) != other\n\n def __dir__(self):\n return super(RESTObject, self).__dir__() + list(self.attributes)\n\n def __hash__(self):\n if not self.get_id():\n return super(RESTObject, self).__hash__()\n return hash(self.get_id())\n\n def _create_managers(self):\n managers = getattr(self, \"_managers\", None)\n if managers is None:\n return\n\n for attr, cls_name in self._managers:\n cls = getattr(self._module, cls_name)\n manager = cls(self.manager.gitlab, parent=self)\n self.__dict__[attr] = manager\n\n def _update_attrs(self, new_attrs):\n self.__dict__[\"_updated_attrs\"] = {}\n self.__dict__[\"_attrs\"] = new_attrs\n\n def get_id(self):\n \"\"\"Returns the id of the resource.\"\"\"\n if self._id_attr is None or not hasattr(self, self._id_attr):\n return None\n return getattr(self, self._id_attr)\n\n @property\n def attributes(self):\n d = self.__dict__[\"_updated_attrs\"].copy()\n d.update(self.__dict__[\"_attrs\"])\n d.update(self.__dict__[\"_parent_attrs\"])\n return d\n\n\nclass RESTObjectList(object):\n \"\"\"Generator object representing a list of RESTObject's.\n\n This generator uses the Gitlab pagination system to fetch new data when\n required.\n\n Note: you should not instanciate such objects, they are returned by calls\n to RESTManager.list()\n\n Args:\n manager: Manager to attach to the created objects\n obj_cls: Type of objects to create from the json data\n _list: A GitlabList object\n \"\"\"\n\n def __init__(self, manager, obj_cls, _list):\n \"\"\"Creates an objects list from a GitlabList.\n\n You should not create objects of this type, but use managers list()\n methods instead.\n\n Args:\n manager: the RESTManager to attach to the objects\n obj_cls: the class of the created objects\n _list: the GitlabList holding the data\n \"\"\"\n self.manager = manager\n self._obj_cls = obj_cls\n self._list = _list\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return len(self._list)\n\n def __next__(self):\n return self.next()\n\n def next(self):\n data = self._list.next()\n return self._obj_cls(self.manager, data)\n\n @property\n def current_page(self):\n \"\"\"The current page number.\"\"\"\n return self._list.current_page\n\n @property\n def prev_page(self):\n \"\"\"The previous page number.\n\n If None, the current page is the first.\n \"\"\"\n return self._list.prev_page\n\n @property\n def next_page(self):\n \"\"\"The next page number.\n\n If None, the current page is the last.\n \"\"\"\n return self._list.next_page\n\n @property\n def per_page(self):\n \"\"\"The number of items per page.\"\"\"\n return self._list.per_page\n\n @property\n def total_pages(self):\n \"\"\"The total number of pages.\"\"\"\n return self._list.total_pages\n\n @property\n def total(self):\n \"\"\"The total number of items.\"\"\"\n return self._list.total\n\n\nclass RESTManager(object):\n \"\"\"Base class for CRUD operations on objects.\n\n Derived class must define ``_path`` and ``_obj_cls``.\n\n ``_path``: Base URL path on which requests will be sent (e.g. '/projects')\n ``_obj_cls``: The class of objects that will be created\n \"\"\"\n\n _path = None\n _obj_cls = None\n\n def __init__(self, gl, parent=None):\n \"\"\"REST manager constructor.\n\n Args:\n gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make\n requests.\n parent: REST object to which the manager is attached.\n \"\"\"\n self.gitlab = gl\n self._parent = parent # for nested managers\n self._computed_path = self._compute_path()\n\n @property\n def parent_attrs(self):\n return self._parent_attrs\n\n def _compute_path(self, path=None):\n self._parent_attrs = {}\n if path is None:\n path = self._path\n if self._parent is None or not hasattr(self, \"_from_parent_attrs\"):\n return path\n\n data = {\n self_attr: getattr(self._parent, parent_attr, None)\n for self_attr, parent_attr in self._from_parent_attrs.items()\n }\n self._parent_attrs = data\n return path % data\n\n @property\n def path(self):\n return self._computed_path\n", "path": "gitlab/base.py" } ]
diff --git a/gitlab/base.py b/gitlab/base.py index 40bc06ce4..ad3533913 100644 --- a/gitlab/base.py +++ b/gitlab/base.py @@ -131,7 +131,7 @@ def _create_managers(self): def _update_attrs(self, new_attrs): self.__dict__["_updated_attrs"] = {} - self.__dict__["_attrs"].update(new_attrs) + self.__dict__["_attrs"] = new_attrs def get_id(self): """Returns the id of the resource.""" diff --git a/gitlab/tests/test_base.py b/gitlab/tests/test_base.py index 58c0d4748..a0adcb03d 100644 --- a/gitlab/tests/test_base.py +++ b/gitlab/tests/test_base.py @@ -128,6 +128,13 @@ def test_update_attrs(self, fake_manager): assert {"foo": "foo", "bar": "bar"} == obj._attrs assert {} == obj._updated_attrs + def test_update_attrs_deleted(self, fake_manager): + obj = FakeObject(fake_manager, {"foo": "foo", "bar": "bar"}) + obj.bar = "baz" + obj._update_attrs({"foo": "foo"}) + assert {"foo": "foo"} == obj._attrs + assert {} == obj._updated_attrs + def test_create_managers(self, fake_gitlab, fake_manager): class ObjectWithManager(FakeObject): _managers = (("fakes", "FakeManager"),)
carltongibson__django-filter-844
Use DRF BooleanFilter as default for DRF FilterSet When using Django-filter together with Django Rest Framework, shouldn't the `BooleanFilter` for DRF be used as default for a DRF `FilterSet`? Currently (**Python 2.7.14**, **Django 1.11.6**, **DRF 3.7.0**, **Django-Filter 1.1.0**) I have to manually specify my boolean fields to use the DRF BooleanFilter to accept lowercase `true`/`false`.
[ { "content": "from copy import deepcopy\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_filters import filterset\n\nfrom .. import compat\nfrom .filters import BooleanFilter, IsoDateTimeFilter\n\nFILTER_FOR_DBFIELD_DEFAULTS = deepcopy(filterset.FILTER_FOR_DBFIELD_DEFAULTS)\nFILTER_FOR_DBFIELD_DEFAULTS.update({\n models.DateTimeField: {'filter_class': IsoDateTimeFilter},\n models.BooleanField: {'filter_class': BooleanFilter},\n})\n\n\nclass FilterSet(filterset.FilterSet):\n FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS\n\n @property\n def form(self):\n form = super().form\n\n if compat.is_crispy():\n from crispy_forms.helper import FormHelper\n from crispy_forms.layout import Layout, Submit\n\n layout_components = list(form.fields.keys()) + [\n Submit('', _('Submit'), css_class='btn-default'),\n ]\n helper = FormHelper()\n helper.form_method = 'GET'\n helper.template_pack = 'bootstrap3'\n helper.layout = Layout(*layout_components)\n\n form.helper = helper\n\n return form\n", "path": "django_filters/rest_framework/filterset.py" } ]
[ { "content": "\nfrom __future__ import absolute_import\n\nfrom copy import deepcopy\n\nfrom django import forms\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_filters import filterset\n\nfrom .. import compat, utils\nfrom .filters import BooleanFilter, IsoDateTimeFilter\n\nFILTER_FOR_DBFIELD_DEFAULTS = deepcopy(filterset.FILTER_FOR_DBFIELD_DEFAULTS)\nFILTER_FOR_DBFIELD_DEFAULTS.update({\n models.DateTimeField: {'filter_class': IsoDateTimeFilter},\n models.BooleanField: {'filter_class': BooleanFilter},\n models.NullBooleanField: {'filter_class': BooleanFilter},\n})\n\n\nclass FilterSet(filterset.FilterSet):\n FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS\n\n @property\n def form(self):\n form = super(FilterSet, self).form\n\n if compat.is_crispy():\n from crispy_forms.helper import FormHelper\n from crispy_forms.layout import Layout, Submit\n\n layout_components = list(form.fields.keys()) + [\n Submit('', _('Submit'), css_class='btn-default'),\n ]\n helper = FormHelper()\n helper.form_method = 'GET'\n helper.template_pack = 'bootstrap3'\n helper.layout = Layout(*layout_components)\n\n form.helper = helper\n\n return form\n\n @property\n def qs(self):\n from rest_framework.exceptions import ValidationError\n\n try:\n return super(FilterSet, self).qs\n except forms.ValidationError as e:\n raise ValidationError(utils.raw_validation(e))\n", "path": "django_filters/rest_framework/filterset.py" } ]
diff --git a/django_filters/rest_framework/filterset.py b/django_filters/rest_framework/filterset.py index 9a984f25f..2f86742fd 100644 --- a/django_filters/rest_framework/filterset.py +++ b/django_filters/rest_framework/filterset.py @@ -16,6 +16,7 @@ FILTER_FOR_DBFIELD_DEFAULTS.update({ models.DateTimeField: {'filter_class': IsoDateTimeFilter}, models.BooleanField: {'filter_class': BooleanFilter}, + models.NullBooleanField: {'filter_class': BooleanFilter}, }) diff --git a/tests/models.py b/tests/models.py index 848e7f36e..0089713ec 100644 --- a/tests/models.py +++ b/tests/models.py @@ -51,6 +51,7 @@ class User(models.Model): status = models.IntegerField(choices=STATUS_CHOICES, default=0) is_active = models.BooleanField(default=False) + is_employed = models.NullBooleanField(default=False) favorite_books = models.ManyToManyField('Book', related_name='lovers') diff --git a/tests/rest_framework/test_filterset.py b/tests/rest_framework/test_filterset.py index bc04ba981..58833b357 100644 --- a/tests/rest_framework/test_filterset.py +++ b/tests/rest_framework/test_filterset.py @@ -31,6 +31,12 @@ def test_booleanfilter_widget(self): self.assertIsInstance(result, filters.BooleanFilter) self.assertEqual(result.extra['widget'], BooleanWidget) + def test_booleanfilter_widget_nullbooleanfield(self): + field = User._meta.get_field('is_employed') + result = FilterSet.filter_for_field(field, 'is_employed') + self.assertIsInstance(result, filters.BooleanFilter) + self.assertEqual(result.extra['widget'], BooleanWidget) + @skipIf(is_crispy(), 'django_crispy_forms must be installed') @override_settings(INSTALLED_APPS=settings.INSTALLED_APPS + ('crispy_forms', ))
PyGithub__PyGithub-946
PaginatedList reversed property loses http headers In reversed(), 'headers' parameter is not passed to PaginatedList(). It makes some APIs not reversible. For example, get_stargazers_with_dates() which requires "Accept: application/vnd.github.v3.star+json" header in the API call.
[ { "content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\ntry:\n from urllib.parse import parse_qs\nexcept ImportError:\n from urlparse import parse_qs\n\nimport github.GithubObject\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, (int, long)):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n print(len(user.get_repos()))\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item=\"items\"):\n PaginatedListBase.__init__(self)\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n if 'link' not in headers:\n self.__totalCount = len(data) if data else 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)['page'][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__nextUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n (url, rel) = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\ntry:\n from urllib.parse import parse_qs\nexcept ImportError:\n from urlparse import parse_qs\n\nimport github.GithubObject\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, (int, long)):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n print(len(user.get_repos()))\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item=\"items\"):\n PaginatedListBase.__init__(self)\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n if 'link' not in headers:\n self.__totalCount = len(data) if data else 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)['page'][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__nextUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n (url, rel) = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py" } ]
diff --git a/github/PaginatedList.py b/github/PaginatedList.py index f67fdca038..586b4a8ed7 100644 --- a/github/PaginatedList.py +++ b/github/PaginatedList.py @@ -175,7 +175,7 @@ def _getLastPageUrl(self): @property def reversed(self): - r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams) + r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item) r.__reverse() return r diff --git a/github/tests/AllTests.py b/github/tests/AllTests.py index 5e92014616..ba787fca4d 100644 --- a/github/tests/AllTests.py +++ b/github/tests/AllTests.py @@ -122,3 +122,4 @@ from Issue494 import * from Issue572 import * from Issue937 import * +from Issue945 import * diff --git a/github/tests/Issue945.py b/github/tests/Issue945.py new file mode 100644 index 0000000000..58e87806ff --- /dev/null +++ b/github/tests/Issue945.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +############################ Copyrights and license ############################ +# # +# Copyright 2018 Kelvin Wong (https://github.com/netsgnut) # +# # +# This file is part of PyGithub. # +# http://pygithub.readthedocs.io/ # +# # +# PyGithub is free software: you can redistribute it and/or modify it under # +# the terms of the GNU Lesser General Public License as published by the Free # +# Software Foundation, either version 3 of the License, or (at your option) # +# any later version. # +# # +# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # +# details. # +# # +# You should have received a copy of the GNU Lesser General Public License # +# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # +# # +################################################################################ + +import Framework + +import github + + +class Issue945(Framework.TestCase): # https://github.com/PyGithub/PyGithub/issues/945 + def setUp(self): + Framework.TestCase.setUp(self) + self.repo = self.g.get_user("openframeworks").get_repo("openFrameworks") + self.list = self.repo.get_issues() + self.list_with_headers = self.repo.get_stargazers_with_dates() + + def testReservedPaginatedListAttributePreservation(self): + r1 = self.list.reversed + self.assertEqual(self.list._PaginatedList__contentClass, r1._PaginatedList__contentClass) + self.assertEqual(self.list._PaginatedList__requester, r1._PaginatedList__requester) + self.assertEqual(self.list._PaginatedList__firstUrl, r1._PaginatedList__firstUrl) + self.assertEqual(self.list._PaginatedList__firstParams, r1._PaginatedList__firstParams) + self.assertEqual(self.list._PaginatedList__headers, r1._PaginatedList__headers) + self.assertEqual(self.list._PaginatedList__list_item, r1._PaginatedList__list_item) + + self.assertTrue(self.list_with_headers._PaginatedList__headers is not None) + r2 = self.list_with_headers.reversed + self.assertEqual(self.list_with_headers._PaginatedList__contentClass, r2._PaginatedList__contentClass) + self.assertEqual(self.list_with_headers._PaginatedList__requester, r2._PaginatedList__requester) + self.assertEqual(self.list_with_headers._PaginatedList__firstUrl, r2._PaginatedList__firstUrl) + self.assertEqual(self.list_with_headers._PaginatedList__firstParams, r2._PaginatedList__firstParams) + self.assertEqual(self.list_with_headers._PaginatedList__headers, r2._PaginatedList__headers) + self.assertEqual(self.list_with_headers._PaginatedList__list_item, r2._PaginatedList__list_item) diff --git a/github/tests/ReplayData/Issue945.setUp.txt b/github/tests/ReplayData/Issue945.setUp.txt new file mode 100644 index 0000000000..0a66bed2ae --- /dev/null +++ b/github/tests/ReplayData/Issue945.setUp.txt @@ -0,0 +1,22 @@ +https +GET +api.github.com +None +/users/openframeworks +{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'} +None +200 +[('Server', 'GitHub.com'), ('Date', 'Fri, 26 Oct 2018 06:02:39 GMT'), ('Content-Type', 'application/json; charset=utf-8'), ('Transfer-Encoding', 'chunked'), ('Status', '200 OK'), ('X-RateLimit-Limit', '5000'), ('X-RateLimit-Remaining', '4973'), ('X-RateLimit-Reset', '1540536267'), ('Cache-Control', 'private, max-age=60, s-maxage=60'), ('Vary', 'Accept, Authorization, Cookie, X-GitHub-OTP'), ('ETag', 'W/"490a721bce7bb5817a38a53711404075"'), ('Last-Modified', 'Wed, 18 Nov 2015 16:33:25 GMT'), ('X-OAuth-Scopes', 'repo'), ('X-Accepted-OAuth-Scopes', ''), ('X-GitHub-Media-Type', 'github.v3; format=json'), ('Access-Control-Expose-Headers', 'ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval'), ('Access-Control-Allow-Origin', '*'), ('Strict-Transport-Security', 'max-age=31536000; includeSubdomains; preload'), ('X-Frame-Options', 'deny'), ('X-Content-Type-Options', 'nosniff'), ('X-XSS-Protection', '1; mode=block'), ('Referrer-Policy', 'origin-when-cross-origin, strict-origin-when-cross-origin'), ('Content-Security-Policy', "default-src 'none'"), ('Content-Encoding', 'gzip'), ('X-GitHub-Request-Id', 'ED32:235D:2CFA30:625A06:5BD2ADFE')] +{"login":"openframeworks","id":142866,"node_id":"MDEyOk9yZ2FuaXphdGlvbjE0Mjg2Ng==","avatar_url":"https://avatars2.githubusercontent.com/u/142866?v=4","gravatar_id":"","url":"https://api.github.com/users/openframeworks","html_url":"https://github.com/openframeworks","followers_url":"https://api.github.com/users/openframeworks/followers","following_url":"https://api.github.com/users/openframeworks/following{/other_user}","gists_url":"https://api.github.com/users/openframeworks/gists{/gist_id}","starred_url":"https://api.github.com/users/openframeworks/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/openframeworks/subscriptions","organizations_url":"https://api.github.com/users/openframeworks/orgs","repos_url":"https://api.github.com/users/openframeworks/repos","events_url":"https://api.github.com/users/openframeworks/events{/privacy}","received_events_url":"https://api.github.com/users/openframeworks/received_events","type":"Organization","site_admin":false,"name":"openFrameworks","company":null,"blog":"http://openframeworks.cc/","location":null,"email":null,"hireable":null,"bio":null,"public_repos":9,"public_gists":0,"followers":0,"following":0,"created_at":"2009-10-21T21:54:37Z","updated_at":"2015-11-18T16:33:25Z"} + +https +GET +api.github.com +None +/repos/openframeworks/openFrameworks +{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'} +None +200 +[('Server', 'GitHub.com'), ('Date', 'Fri, 26 Oct 2018 06:02:40 GMT'), ('Content-Type', 'application/json; charset=utf-8'), ('Transfer-Encoding', 'chunked'), ('Status', '200 OK'), ('X-RateLimit-Limit', '5000'), ('X-RateLimit-Remaining', '4972'), ('X-RateLimit-Reset', '1540536267'), ('Cache-Control', 'private, max-age=60, s-maxage=60'), ('Vary', 'Accept, Authorization, Cookie, X-GitHub-OTP'), ('ETag', 'W/"501068d9a3e67fb624ce5e83e1270d41"'), ('Last-Modified', 'Fri, 26 Oct 2018 04:18:01 GMT'), ('X-OAuth-Scopes', 'repo'), ('X-Accepted-OAuth-Scopes', 'repo'), ('X-GitHub-Media-Type', 'github.v3; format=json'), ('Access-Control-Expose-Headers', 'ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval'), ('Access-Control-Allow-Origin', '*'), ('Strict-Transport-Security', 'max-age=31536000; includeSubdomains; preload'), ('X-Frame-Options', 'deny'), ('X-Content-Type-Options', 'nosniff'), ('X-XSS-Protection', '1; mode=block'), ('Referrer-Policy', 'origin-when-cross-origin, strict-origin-when-cross-origin'), ('Content-Security-Policy', "default-src 'none'"), ('Content-Encoding', 'gzip'), ('X-GitHub-Request-Id', 'ACCA:235E:3C0006:7729D5:5BD2ADFF')] +{"id":345337,"node_id":"MDEwOlJlcG9zaXRvcnkzNDUzMzc=","name":"openFrameworks","full_name":"openframeworks/openFrameworks","private":false,"owner":{"login":"openframeworks","id":142866,"node_id":"MDEyOk9yZ2FuaXphdGlvbjE0Mjg2Ng==","avatar_url":"https://avatars2.githubusercontent.com/u/142866?v=4","gravatar_id":"","url":"https://api.github.com/users/openframeworks","html_url":"https://github.com/openframeworks","followers_url":"https://api.github.com/users/openframeworks/followers","following_url":"https://api.github.com/users/openframeworks/following{/other_user}","gists_url":"https://api.github.com/users/openframeworks/gists{/gist_id}","starred_url":"https://api.github.com/users/openframeworks/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/openframeworks/subscriptions","organizations_url":"https://api.github.com/users/openframeworks/orgs","repos_url":"https://api.github.com/users/openframeworks/repos","events_url":"https://api.github.com/users/openframeworks/events{/privacy}","received_events_url":"https://api.github.com/users/openframeworks/received_events","type":"Organization","site_admin":false},"html_url":"https://github.com/openframeworks/openFrameworks","description":"openFrameworks is a community-developed cross platform toolkit for creative coding in C++.","fork":false,"url":"https://api.github.com/repos/openframeworks/openFrameworks","forks_url":"https://api.github.com/repos/openframeworks/openFrameworks/forks","keys_url":"https://api.github.com/repos/openframeworks/openFrameworks/keys{/key_id}","collaborators_url":"https://api.github.com/repos/openframeworks/openFrameworks/collaborators{/collaborator}","teams_url":"https://api.github.com/repos/openframeworks/openFrameworks/teams","hooks_url":"https://api.github.com/repos/openframeworks/openFrameworks/hooks","issue_events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/events{/number}","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/events","assignees_url":"https://api.github.com/repos/openframeworks/openFrameworks/assignees{/user}","branches_url":"https://api.github.com/repos/openframeworks/openFrameworks/branches{/branch}","tags_url":"https://api.github.com/repos/openframeworks/openFrameworks/tags","blobs_url":"https://api.github.com/repos/openframeworks/openFrameworks/git/blobs{/sha}","git_tags_url":"https://api.github.com/repos/openframeworks/openFrameworks/git/tags{/sha}","git_refs_url":"https://api.github.com/repos/openframeworks/openFrameworks/git/refs{/sha}","trees_url":"https://api.github.com/repos/openframeworks/openFrameworks/git/trees{/sha}","statuses_url":"https://api.github.com/repos/openframeworks/openFrameworks/statuses/{sha}","languages_url":"https://api.github.com/repos/openframeworks/openFrameworks/languages","stargazers_url":"https://api.github.com/repos/openframeworks/openFrameworks/stargazers","contributors_url":"https://api.github.com/repos/openframeworks/openFrameworks/contributors","subscribers_url":"https://api.github.com/repos/openframeworks/openFrameworks/subscribers","subscription_url":"https://api.github.com/repos/openframeworks/openFrameworks/subscription","commits_url":"https://api.github.com/repos/openframeworks/openFrameworks/commits{/sha}","git_commits_url":"https://api.github.com/repos/openframeworks/openFrameworks/git/commits{/sha}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/comments{/number}","issue_comment_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/comments{/number}","contents_url":"https://api.github.com/repos/openframeworks/openFrameworks/contents/{+path}","compare_url":"https://api.github.com/repos/openframeworks/openFrameworks/compare/{base}...{head}","merges_url":"https://api.github.com/repos/openframeworks/openFrameworks/merges","archive_url":"https://api.github.com/repos/openframeworks/openFrameworks/{archive_format}{/ref}","downloads_url":"https://api.github.com/repos/openframeworks/openFrameworks/downloads","issues_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues{/number}","pulls_url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls{/number}","milestones_url":"https://api.github.com/repos/openframeworks/openFrameworks/milestones{/number}","notifications_url":"https://api.github.com/repos/openframeworks/openFrameworks/notifications{?since,all,participating}","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/labels{/name}","releases_url":"https://api.github.com/repos/openframeworks/openFrameworks/releases{/id}","deployments_url":"https://api.github.com/repos/openframeworks/openFrameworks/deployments","created_at":"2009-10-21T21:55:54Z","updated_at":"2018-10-26T04:18:01Z","pushed_at":"2018-10-18T15:56:59Z","git_url":"git://github.com/openframeworks/openFrameworks.git","ssh_url":"[email protected]:openframeworks/openFrameworks.git","clone_url":"https://github.com/openframeworks/openFrameworks.git","svn_url":"https://github.com/openframeworks/openFrameworks","homepage":"http://openframeworks.cc","size":1997625,"stargazers_count":6453,"watchers_count":6453,"language":"C++","has_issues":true,"has_projects":true,"has_downloads":true,"has_wiki":true,"has_pages":false,"forks_count":2143,"mirror_url":null,"archived":false,"open_issues_count":874,"license":{"key":"other","name":"Other","spdx_id":"NOASSERTION","url":null,"node_id":"MDc6TGljZW5zZTA="},"forks":2143,"open_issues":874,"watchers":6453,"default_branch":"patch-release","permissions":{"admin":false,"push":false,"pull":true},"organization":{"login":"openframeworks","id":142866,"node_id":"MDEyOk9yZ2FuaXphdGlvbjE0Mjg2Ng==","avatar_url":"https://avatars2.githubusercontent.com/u/142866?v=4","gravatar_id":"","url":"https://api.github.com/users/openframeworks","html_url":"https://github.com/openframeworks","followers_url":"https://api.github.com/users/openframeworks/followers","following_url":"https://api.github.com/users/openframeworks/following{/other_user}","gists_url":"https://api.github.com/users/openframeworks/gists{/gist_id}","starred_url":"https://api.github.com/users/openframeworks/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/openframeworks/subscriptions","organizations_url":"https://api.github.com/users/openframeworks/orgs","repos_url":"https://api.github.com/users/openframeworks/repos","events_url":"https://api.github.com/users/openframeworks/events{/privacy}","received_events_url":"https://api.github.com/users/openframeworks/received_events","type":"Organization","site_admin":false},"network_count":2143,"subscribers_count":514} + diff --git a/github/tests/ReplayData/Issue945.testReservedPaginatedListAttributePreservation.txt b/github/tests/ReplayData/Issue945.testReservedPaginatedListAttributePreservation.txt new file mode 100644 index 0000000000..06d0715b20 --- /dev/null +++ b/github/tests/ReplayData/Issue945.testReservedPaginatedListAttributePreservation.txt @@ -0,0 +1,22 @@ +https +GET +api.github.com +None +/repos/openframeworks/openFrameworks/issues +{'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'} +None +200 +[('Server', 'GitHub.com'), ('Date', 'Fri, 26 Oct 2018 06:02:41 GMT'), ('Content-Type', 'application/json; charset=utf-8'), ('Transfer-Encoding', 'chunked'), ('Status', '200 OK'), ('X-RateLimit-Limit', '5000'), ('X-RateLimit-Remaining', '4971'), ('X-RateLimit-Reset', '1540536267'), ('Cache-Control', 'private, max-age=60, s-maxage=60'), ('Vary', 'Accept, Authorization, Cookie, X-GitHub-OTP'), ('ETag', 'W/"77d135814a1f98b76a3f0c1819433035"'), ('X-OAuth-Scopes', 'repo'), ('X-Accepted-OAuth-Scopes', 'repo'), ('X-GitHub-Media-Type', 'github.v3; format=json'), ('Link', '<https://api.github.com/repositories/345337/issues?page=2>; rel="next", <https://api.github.com/repositories/345337/issues?page=30>; rel="last"'), ('Access-Control-Expose-Headers', 'ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval'), ('Access-Control-Allow-Origin', '*'), ('Strict-Transport-Security', 'max-age=31536000; includeSubdomains; preload'), ('X-Frame-Options', 'deny'), ('X-Content-Type-Options', 'nosniff'), ('X-XSS-Protection', '1; mode=block'), ('Referrer-Policy', 'origin-when-cross-origin, strict-origin-when-cross-origin'), ('Content-Security-Policy', "default-src 'none'"), ('Content-Encoding', 'gzip'), ('X-GitHub-Request-Id', 'ACEC:235E:3C0061:772AA8:5BD2AE00')] +[{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6158","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6158/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6158/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6158/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6158","id":372772260,"node_id":"MDU6SXNzdWUzNzI3NzIyNjA=","number":6158,"title":"Compiling libraries msys2 error: \"This package doesn’t support your platform\" ","user":{"login":"invisiblesignal","id":44383181,"node_id":"MDQ6VXNlcjQ0MzgzMTgx","avatar_url":"https://avatars0.githubusercontent.com/u/44383181?v=4","gravatar_id":"","url":"https://api.github.com/users/invisiblesignal","html_url":"https://github.com/invisiblesignal","followers_url":"https://api.github.com/users/invisiblesignal/followers","following_url":"https://api.github.com/users/invisiblesignal/following{/other_user}","gists_url":"https://api.github.com/users/invisiblesignal/gists{/gist_id}","starred_url":"https://api.github.com/users/invisiblesignal/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/invisiblesignal/subscriptions","organizations_url":"https://api.github.com/users/invisiblesignal/orgs","repos_url":"https://api.github.com/users/invisiblesignal/repos","events_url":"https://api.github.com/users/invisiblesignal/events{/privacy}","received_events_url":"https://api.github.com/users/invisiblesignal/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-23T01:58:47Z","updated_at":"2018-10-23T01:58:47Z","closed_at":null,"author_association":"NONE","body":"New user trying to install v0.10.0 Windows Qt Creator/msys2. I'm following setup guide here (https://openframeworks.cc/setup/msys2/) and have problem compiling oF libraries... in MINGW32 shell after typing \"make\" I get the following error: “HOST_OS=MINGW32_NT-10.0\r\nmakefileCommon/config.shared.mk:207: *** This package doesn’t support your platform, probably you downloaded the wrong package?. Stop.”\r\n\r\nI tried both v0.10.0_msys2_release as well as the v20181009_msys2_nightly, get same error in both cases.\r\n\r\nAny advice?"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6157","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6157/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6157/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6157/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6157","id":372373003,"node_id":"MDU6SXNzdWUzNzIzNzMwMDM=","number":6157,"title":"suggestion of improvement : ofRectangle operator","user":{"login":"dimitre","id":58289,"node_id":"MDQ6VXNlcjU4Mjg5","avatar_url":"https://avatars1.githubusercontent.com/u/58289?v=4","gravatar_id":"","url":"https://api.github.com/users/dimitre","html_url":"https://github.com/dimitre","followers_url":"https://api.github.com/users/dimitre/followers","following_url":"https://api.github.com/users/dimitre/following{/other_user}","gists_url":"https://api.github.com/users/dimitre/gists{/gist_id}","starred_url":"https://api.github.com/users/dimitre/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/dimitre/subscriptions","organizations_url":"https://api.github.com/users/dimitre/orgs","repos_url":"https://api.github.com/users/dimitre/repos","events_url":"https://api.github.com/users/dimitre/events{/privacy}","received_events_url":"https://api.github.com/users/dimitre/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-22T01:11:21Z","updated_at":"2018-10-22T01:11:21Z","closed_at":null,"author_association":"NONE","body":"it would be great to be able to multiply an ofRectangle by and int or float, as in GLSL or glm.\r\n\r\nexample: \r\n```c++\r\nfloat scale = 2.0;\r\nfbo.draw(rect * scale);\r\n```"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6156","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6156/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6156/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6156/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6156","id":372300625,"node_id":"MDU6SXNzdWUzNzIzMDA2MjU=","number":6156,"title":"ofSoundBuffer::getChannel() causes exception","user":{"login":"hugoymh","id":34024260,"node_id":"MDQ6VXNlcjM0MDI0MjYw","avatar_url":"https://avatars2.githubusercontent.com/u/34024260?v=4","gravatar_id":"","url":"https://api.github.com/users/hugoymh","html_url":"https://github.com/hugoymh","followers_url":"https://api.github.com/users/hugoymh/followers","following_url":"https://api.github.com/users/hugoymh/following{/other_user}","gists_url":"https://api.github.com/users/hugoymh/gists{/gist_id}","starred_url":"https://api.github.com/users/hugoymh/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/hugoymh/subscriptions","organizations_url":"https://api.github.com/users/hugoymh/orgs","repos_url":"https://api.github.com/users/hugoymh/repos","events_url":"https://api.github.com/users/hugoymh/events{/privacy}","received_events_url":"https://api.github.com/users/hugoymh/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-21T10:34:30Z","updated_at":"2018-10-21T10:36:45Z","closed_at":null,"author_association":"NONE","body":"On MacOS, Xcode 9.4.1\r\n\r\n```\r\nvoid ofSoundBuffer::getChannel(ofSoundBuffer & targetBuffer, std::size_t sourceChannel) const {\r\n//...\r\n\tif(channels == 1){\r\n //third argument is zero\r\n\t\tcopyTo(targetBuffer, getNumFrames(), 0, 0);\r\n\t}\r\n//...\r\n}\r\n```\r\n```\r\nvoid ofSoundBuffer::copyTo(ofSoundBuffer & soundBuffer, std::size_t nFrames, std::size_t outChannels,std::size_t fromFrame,bool loop) const{\r\n\tsoundBuffer.resize(nFrames*outChannels);\r\n\tsoundBuffer.setNumChannels(outChannels);\r\n\tsoundBuffer.setSampleRate(samplerate);\r\n//third argument (outChannels) passed as zero\r\n\tcopyTo(&soundBuffer[0], nFrames, outChannels, fromFrame, loop);\r\n}\r\n\r\n```\r\n```\r\nvoid ofSoundBuffer::setNumChannels(int channels){\r\n//this line assigns this->channels to zero\t\r\nthis->channels = channels;\r\n\tcheckSizeAndChannelsConsistency(\"setNumChannels\");\r\n}\r\n```\r\n```\r\nbool ofSoundBuffer::checkSizeAndChannelsConsistency(const std::string& _function ) {\r\n\tstd::string function = _function;\r\n\r\n\tif ( function.size()!= 0 ){\r\n\t\tfunction += \": \";\r\n\t}\r\n//if statement results in division by zero, causing exception\r\n\tif ( (size()%channels) != 0 ){\r\n\t\tofLogWarning(\"ofSoundBuffer\") << function << \"channel count \" << channels << \" is not consistent with sample count \" << size() << \" (non-zero remainder)\";\r\n\t\treturn false;\r\n\t}\r\n\treturn true;\r\n}\r\n```\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6155","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6155/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6155/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6155/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6155","id":371599846,"node_id":"MDExOlB1bGxSZXF1ZXN0MjI0MDExMTQ0","number":6155,"title":"Make ofSystemTextBoxDialog return empty string on cancel.","user":{"login":"ShadowMitia","id":3752363,"node_id":"MDQ6VXNlcjM3NTIzNjM=","avatar_url":"https://avatars1.githubusercontent.com/u/3752363?v=4","gravatar_id":"","url":"https://api.github.com/users/ShadowMitia","html_url":"https://github.com/ShadowMitia","followers_url":"https://api.github.com/users/ShadowMitia/followers","following_url":"https://api.github.com/users/ShadowMitia/following{/other_user}","gists_url":"https://api.github.com/users/ShadowMitia/gists{/gist_id}","starred_url":"https://api.github.com/users/ShadowMitia/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ShadowMitia/subscriptions","organizations_url":"https://api.github.com/users/ShadowMitia/orgs","repos_url":"https://api.github.com/users/ShadowMitia/repos","events_url":"https://api.github.com/users/ShadowMitia/events{/privacy}","received_events_url":"https://api.github.com/users/ShadowMitia/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-18T15:56:58Z","updated_at":"2018-10-18T15:56:58Z","closed_at":null,"author_association":"CONTRIBUTOR","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6155","html_url":"https://github.com/openframeworks/openFrameworks/pull/6155","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6155.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6155.patch"},"body":"See issue #4945.\r\n\r\nI've made changes for macOS as suggested in the thread and adapted it for Linux and Windows.\r\n\r\nI've only been able to test on Linux right now, I believe it should work fine on macOS, but I'm really unsure on the windows side.\r\n\r\nI haven't looked at the parts of the code that deal with android and emscripten."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6154","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6154/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6154/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6154/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6154","id":371527272,"node_id":"MDU6SXNzdWUzNzE1MjcyNzI=","number":6154,"title":"ofSoundPlayer load() hangs on linux when streaming is true","user":{"login":"davidemania","id":6617948,"node_id":"MDQ6VXNlcjY2MTc5NDg=","avatar_url":"https://avatars1.githubusercontent.com/u/6617948?v=4","gravatar_id":"","url":"https://api.github.com/users/davidemania","html_url":"https://github.com/davidemania","followers_url":"https://api.github.com/users/davidemania/followers","following_url":"https://api.github.com/users/davidemania/following{/other_user}","gists_url":"https://api.github.com/users/davidemania/gists{/gist_id}","starred_url":"https://api.github.com/users/davidemania/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/davidemania/subscriptions","organizations_url":"https://api.github.com/users/davidemania/orgs","repos_url":"https://api.github.com/users/davidemania/repos","events_url":"https://api.github.com/users/davidemania/events{/privacy}","received_events_url":"https://api.github.com/users/davidemania/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-18T13:20:43Z","updated_at":"2018-10-18T13:22:40Z","closed_at":null,"author_association":"NONE","body":"If the ofSoundPlayer::load() method is called with streaming == false (the default) everything works as expected, but setting the streaming option to true causes the application to hang badly, it stops responding and has to be killed.\r\n\r\nThe issue can be reproduced easily with the soundPlayerExample in examples directory, just adding “, true” to the load call in setup()\r\n\r\nTest conditions: OS is Ubuntu 18.04, OF version 0.10.0"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6153","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6153/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6153/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6153/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6153","id":371353361,"node_id":"MDU6SXNzdWUzNzEzNTMzNjE=","number":6153,"title":"Android Studio for all of CPP files: This File is not part of the project...","user":{"login":"whyameye","id":1716966,"node_id":"MDQ6VXNlcjE3MTY5NjY=","avatar_url":"https://avatars0.githubusercontent.com/u/1716966?v=4","gravatar_id":"","url":"https://api.github.com/users/whyameye","html_url":"https://github.com/whyameye","followers_url":"https://api.github.com/users/whyameye/followers","following_url":"https://api.github.com/users/whyameye/following{/other_user}","gists_url":"https://api.github.com/users/whyameye/gists{/gist_id}","starred_url":"https://api.github.com/users/whyameye/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/whyameye/subscriptions","organizations_url":"https://api.github.com/users/whyameye/orgs","repos_url":"https://api.github.com/users/whyameye/repos","events_url":"https://api.github.com/users/whyameye/events{/privacy}","received_events_url":"https://api.github.com/users/whyameye/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":6,"created_at":"2018-10-18T04:04:17Z","updated_at":"2018-10-18T13:02:12Z","closed_at":null,"author_association":"NONE","body":"After trying the latest release of oF I switched to the master branch and am still having the same problem:\r\nUsing the project generator I convert an Android example to Android Studio. Then loading into Android Studio I get for all oF cpp files: \"This file is not part of the project. Please include it in the appropriate build file...\" I'm not an Android Studio expert and it is not obvious to me how to move forward as CMakelists.txt doesn't exist etc.\r\n\r\nAndroid Studio compiles the code fine but practically every line is marked as an error in the IDE as all the references are broken."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6152","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6152/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6152/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6152/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6152","id":369295799,"node_id":"MDU6SXNzdWUzNjkyOTU3OTk=","number":6152,"title":"Proposed change: expose alpha flags for ofAppEGLWindow","user":{"login":"jvcleave","id":150037,"node_id":"MDQ6VXNlcjE1MDAzNw==","avatar_url":"https://avatars0.githubusercontent.com/u/150037?v=4","gravatar_id":"","url":"https://api.github.com/users/jvcleave","html_url":"https://github.com/jvcleave","followers_url":"https://api.github.com/users/jvcleave/followers","following_url":"https://api.github.com/users/jvcleave/following{/other_user}","gists_url":"https://api.github.com/users/jvcleave/gists{/gist_id}","starred_url":"https://api.github.com/users/jvcleave/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/jvcleave/subscriptions","organizations_url":"https://api.github.com/users/jvcleave/orgs","repos_url":"https://api.github.com/users/jvcleave/repos","events_url":"https://api.github.com/users/jvcleave/events{/privacy}","received_events_url":"https://api.github.com/users/jvcleave/received_events","type":"User","site_admin":false},"labels":[{"id":28684,"node_id":"MDU6TGFiZWwyODY4NA==","url":"https://api.github.com/repos/openframeworks/openFrameworks/labels/feature","name":"feature","color":"622425","default":false}],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-11T20:38:47Z","updated_at":"2018-10-11T20:38:47Z","closed_at":null,"author_association":"MEMBER","body":"Currently the default alpha flag set for a RPi window is `DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS`\r\n\r\nOther options are:\r\n\r\n```\r\nDISPMANX_FLAGS_ALPHA_FROM_SOURCE\r\nDISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS \r\nDISPMANX_FLAGS_ALPHA_FIXED_NON_ZERO \r\nDISPMANX_FLAGS_ALPHA_FIXED_EXCEED_0X07\r\nDISPMANX_FLAGS_ALPHA_PREMULT\r\nDISPMANX_FLAGS_ALPHA_MIX \r\n```\r\n\r\n`DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS` sets all alpha values to the value of `settings.eglWindowOpacity` (default 255). For example, if `settings.eglWindowOpacity` is set to 128 and I draw a rectangle with an alpha value of 255 the alpha value will not exceed 128.\r\n\r\nThe most interesting is `DISPMANX_FLAGS_ALPHA_FROM_SOURCE`. With `settings.eglWindowOpacity` set to 0, this allows a totally transparent window but elements drawn retain their alpha values. This enables multiple layers of OF apps or OF apps overlaying other apps\r\n\r\n\r\nExample code:\r\n\r\n```\r\n#include \"ofMain.h\"\r\n#include \"ofAppEGLWindow.h\"\r\n\r\nclass ofApp : public ofBaseApp{\r\npublic:\r\n\r\n void draw()\r\n {\r\n //Set background to alpha 0\r\n ofColor bgColor(ofColor::black, 0);\r\n ofBackground(bgColor);\r\n \r\n int alpha = ofGetFrameNum()%255;\r\n\r\n ofPushStyle();\r\n ofColor orangeBG(ofColor::orange, alpha);\r\n ofSetColor(orangeBG); \r\n ofDrawRectangle(0, 0, 500, 500);\r\n ofPopStyle();\r\n }\r\n};\r\n\r\n\r\nint main( )\r\n{\r\n ofGLESWindowSettings windowSettings;\r\n windowSettings.setSize(1024, 768);\r\n \r\n ofAppEGLWindow::Settings settings(windowSettings);\r\n \r\n settings.eglWindowOpacity = 0;\r\n settings.alphaFlags = DISPMANX_FLAGS_ALPHA_FROM_SOURCE; //proposed change\r\n settings.layer = 2;\r\n \r\n ofCreateWindow(settings);\r\n ofRunApp(new ofApp());\r\n \r\n}\r\n\r\n```\r\n\r\nWith the above code you could start another app (e.g. omxplayer, another OF app) and an orange box would be drawn on top with transparency retained.\r\n\r\n\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6151","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6151/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6151/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6151/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6151","id":369254388,"node_id":"MDU6SXNzdWUzNjkyNTQzODg=","number":6151,"title":"soundExample crahsed with `raspberry pi B+` X `raspbian stretch`","user":{"login":"icq4ever","id":530796,"node_id":"MDQ6VXNlcjUzMDc5Ng==","avatar_url":"https://avatars0.githubusercontent.com/u/530796?v=4","gravatar_id":"","url":"https://api.github.com/users/icq4ever","html_url":"https://github.com/icq4ever","followers_url":"https://api.github.com/users/icq4ever/followers","following_url":"https://api.github.com/users/icq4ever/following{/other_user}","gists_url":"https://api.github.com/users/icq4ever/gists{/gist_id}","starred_url":"https://api.github.com/users/icq4ever/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/icq4ever/subscriptions","organizations_url":"https://api.github.com/users/icq4ever/orgs","repos_url":"https://api.github.com/users/icq4ever/repos","events_url":"https://api.github.com/users/icq4ever/events{/privacy}","received_events_url":"https://api.github.com/users/icq4ever/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-11T18:41:47Z","updated_at":"2018-10-11T20:57:45Z","closed_at":null,"author_association":"NONE","body":"soundPlayerFFTExample and soundPlayerExample is crashed with openframeworks 0.10.0 with clean setup raspbian stretch x raspberry pi B+(old one). it looks like something related with alsa. launching app is ok unless any play sound (ie. click to play).\r\n\r\nI can see error message `Illegal Instruction`\r\n\r\nalsa is working. I can confim with command :\r\n`speaker-test -t wave -c`\r\n\r\n---\r\nby the way, there's no issue on raspberry pi 3 b+."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6149","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6149/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6149/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6149/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6149","id":367529586,"node_id":"MDU6SXNzdWUzNjc1Mjk1ODY=","number":6149,"title":"General issue for other problems with xcode 10 / macos 10.14","user":{"login":"arturoc","id":48240,"node_id":"MDQ6VXNlcjQ4MjQw","avatar_url":"https://avatars0.githubusercontent.com/u/48240?v=4","gravatar_id":"","url":"https://api.github.com/users/arturoc","html_url":"https://github.com/arturoc","followers_url":"https://api.github.com/users/arturoc/followers","following_url":"https://api.github.com/users/arturoc/following{/other_user}","gists_url":"https://api.github.com/users/arturoc/gists{/gist_id}","starred_url":"https://api.github.com/users/arturoc/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/arturoc/subscriptions","organizations_url":"https://api.github.com/users/arturoc/orgs","repos_url":"https://api.github.com/users/arturoc/repos","events_url":"https://api.github.com/users/arturoc/events{/privacy}","received_events_url":"https://api.github.com/users/arturoc/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":17,"created_at":"2018-10-07T07:34:50Z","updated_at":"2018-10-23T17:22:12Z","closed_at":null,"author_association":"MEMBER","body":"Opening this since more stuff comes up and not sure it even belongs on a new issue or it's all related:\r\n\r\nhttps://forum.openframeworks.cc/t/ofimage-issue-in-xcode/30628"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6148","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6148/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6148/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6148/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6148","id":367486070,"node_id":"MDU6SXNzdWUzNjc0ODYwNzA=","number":6148,"title":"can't access Resources/ inside main()","user":{"login":"kylemcdonald","id":157106,"node_id":"MDQ6VXNlcjE1NzEwNg==","avatar_url":"https://avatars3.githubusercontent.com/u/157106?v=4","gravatar_id":"","url":"https://api.github.com/users/kylemcdonald","html_url":"https://github.com/kylemcdonald","followers_url":"https://api.github.com/users/kylemcdonald/followers","following_url":"https://api.github.com/users/kylemcdonald/following{/other_user}","gists_url":"https://api.github.com/users/kylemcdonald/gists{/gist_id}","starred_url":"https://api.github.com/users/kylemcdonald/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/kylemcdonald/subscriptions","organizations_url":"https://api.github.com/users/kylemcdonald/orgs","repos_url":"https://api.github.com/users/kylemcdonald/repos","events_url":"https://api.github.com/users/kylemcdonald/events{/privacy}","received_events_url":"https://api.github.com/users/kylemcdonald/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":1,"created_at":"2018-10-06T19:04:06Z","updated_at":"2018-10-07T07:39:09Z","closed_at":null,"author_association":"CONTRIBUTOR","body":"I have an app that needs to do some things (like decide whether to decorate the window) before the app is run. This needs to be configured by a file inside the Resources/ folder. Usually [the solution](http://nickhardeman.com/591/preparing-an-openframeworks-application-for-the-mac-app-store/) is to add `ofSetDataPathRoot(\"../Resources\");` but that doesn't work inside `main()`:\r\n\r\n```c++\r\n#include \"ofMain.h\"\r\nint main() {\r\n ofSetDataPathRoot(\"../Resources\");\r\n ofLog() << ofToDataPath(\".\", true);\r\n}\r\n```\r\n\r\n```\r\n[notice ] /Users/kyle/Documents/openFrameworks/apps/Project/App/bin/../Resources/.\r\n```\r\n\r\nI also tried adding `ofRestoreWorkingDirectoryToDefault()`, `ofSetDataPathRoot(\".\")` and `ofToDataPath(\".\")` before `ofSetDataPathRoot()`.\r\n\r\nThe only solution I found is to imitate the code inside `defaultDataPath()`:\r\n\r\n`ofSetDataPathRoot(ofFilePath::join(ofFilePath::getCurrentExeDir(), \"../Resources\"));`"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6147","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6147/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6147/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6147/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6147","id":367451026,"node_id":"MDU6SXNzdWUzNjc0NTEwMjY=","number":6147,"title":"Rendering completely broken on macos 10.14","user":{"login":"arturoc","id":48240,"node_id":"MDQ6VXNlcjQ4MjQw","avatar_url":"https://avatars0.githubusercontent.com/u/48240?v=4","gravatar_id":"","url":"https://api.github.com/users/arturoc","html_url":"https://github.com/arturoc","followers_url":"https://api.github.com/users/arturoc/followers","following_url":"https://api.github.com/users/arturoc/following{/other_user}","gists_url":"https://api.github.com/users/arturoc/gists{/gist_id}","starred_url":"https://api.github.com/users/arturoc/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/arturoc/subscriptions","organizations_url":"https://api.github.com/users/arturoc/orgs","repos_url":"https://api.github.com/users/arturoc/repos","events_url":"https://api.github.com/users/arturoc/events{/privacy}","received_events_url":"https://api.github.com/users/arturoc/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":3,"created_at":"2018-10-06T12:03:12Z","updated_at":"2018-10-06T16:04:25Z","closed_at":null,"author_association":"MEMBER","body":"https://forum.openframeworks.cc/t/no-rendering-initially-in-mojave/30624"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6146","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6146/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6146/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6146/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6146","id":367409282,"node_id":"MDU6SXNzdWUzNjc0MDkyODI=","number":6146,"title":"ofVerticalSync(true) broken on macOS 10.14 + XCode 10.0","user":{"login":"kylemcdonald","id":157106,"node_id":"MDQ6VXNlcjE1NzEwNg==","avatar_url":"https://avatars3.githubusercontent.com/u/157106?v=4","gravatar_id":"","url":"https://api.github.com/users/kylemcdonald","html_url":"https://github.com/kylemcdonald","followers_url":"https://api.github.com/users/kylemcdonald/followers","following_url":"https://api.github.com/users/kylemcdonald/following{/other_user}","gists_url":"https://api.github.com/users/kylemcdonald/gists{/gist_id}","starred_url":"https://api.github.com/users/kylemcdonald/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/kylemcdonald/subscriptions","organizations_url":"https://api.github.com/users/kylemcdonald/orgs","repos_url":"https://api.github.com/users/kylemcdonald/repos","events_url":"https://api.github.com/users/kylemcdonald/events{/privacy}","received_events_url":"https://api.github.com/users/kylemcdonald/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":5,"created_at":"2018-10-06T01:02:51Z","updated_at":"2018-10-18T14:02:49Z","closed_at":null,"author_association":"CONTRIBUTOR","body":"Using the latest OF from Github.\r\n\r\nI don't know which version of XCode, OF, or macOS created this problem.\r\n\r\nHere's a simple example that should take 4 seconds for a line to walk across the screen from left to right:\r\n\r\n```c++\r\n#include \"ofMain.h\"\r\n\r\nclass ofApp : public ofBaseApp {\r\npublic:\r\n void setup() {\r\n ofSetVerticalSync(true);\r\n ofBackground(0);\r\n }\r\n void update() {\r\n }\r\n void draw() {\r\n ofDrawRectangle(ofGetFrameNum() % 240, 0, 1, 240);\r\n }\r\n};\r\n\r\nint main() {\r\n ofSetupOpenGL(240, 240, OF_WINDOW);\r\n ofRunApp(new ofApp());\r\n}\r\n```\r\n\r\nInstead, it runs much faster, similar to how apps would run without a framerate limit when they are in the background/not in focus."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6145","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6145/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6145/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6145/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6145","id":366997350,"node_id":"MDU6SXNzdWUzNjY5OTczNTA=","number":6145,"title":"Possible memory leak in ofxGui","user":{"login":"roymacdonald","id":974878,"node_id":"MDQ6VXNlcjk3NDg3OA==","avatar_url":"https://avatars0.githubusercontent.com/u/974878?v=4","gravatar_id":"","url":"https://api.github.com/users/roymacdonald","html_url":"https://github.com/roymacdonald","followers_url":"https://api.github.com/users/roymacdonald/followers","following_url":"https://api.github.com/users/roymacdonald/following{/other_user}","gists_url":"https://api.github.com/users/roymacdonald/gists{/gist_id}","starred_url":"https://api.github.com/users/roymacdonald/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/roymacdonald/subscriptions","organizations_url":"https://api.github.com/users/roymacdonald/orgs","repos_url":"https://api.github.com/users/roymacdonald/repos","events_url":"https://api.github.com/users/roymacdonald/events{/privacy}","received_events_url":"https://api.github.com/users/roymacdonald/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":2,"created_at":"2018-10-04T22:42:51Z","updated_at":"2018-10-05T15:06:50Z","closed_at":null,"author_association":"MEMBER","body":"I just noticed that in ofxGuiGroup all the `add(ofParameter<T> & parameter)` functions call new as you can see [here](https://github.com/openframeworks/openFrameworks/blob/master/addons/ofxGui/src/ofxGuiGroup.cpp#L178-L229), thus creating the gui object needed. but these newly created objects never get deleted even after the `ofGuiGroup` is destroyed. In a lot of cases this might not be a problem as a lot of times the the `ofxGuiGroup` or `ofxPanel` object exists for the whole runtime of the app, but in scenarios where the gui is created and destroyed dynamically this is a problem.\r\n\r\nThe following code is a proof of this.\r\n````cpp\r\n#include \"ofMain.h\"\r\n#include \"ofxGui.h\"\r\n\r\nclass ofApp : public ofBaseApp\r\n{\r\npublic:\r\n\t//----------------------------------------------------------------\r\n\tvoid setup(){\t\t\r\n\t\tgui = new ofxPanel();\r\n\t\tgui->setup();\r\n\t\tgui->add(param.set(\"param\", 0, 0, 255));\r\n\t\tsliderPtr = &(gui->getFloatSlider(\"param\"));\r\n\t\t\r\n\t};\r\n\t//----------------------------------------------------------------\r\n\tvoid draw(){\r\n\t\t\r\n\t\tofBackground(param, 0, 0);\r\n\t\t\r\n\t\tif (displayGui){\r\n\t\t\tif(gui){\r\n\t\t\t\tgui->draw();\r\n\t\t\t}else if (sliderPtr){\r\n\t\t\t\tsliderPtr->draw();\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tstringstream ss;\r\n\t\t\r\n\t\tif(sliderPtr){\r\n\t\t\tss << \"sliderPtr \" << sliderPtr << endl;\t\r\n\t\t}else{\r\n\t\t\tss << \"sliderPtr is null\" << endl;\r\n\t\t}\r\n\t\tif(gui){\r\n\t\t\tss << \"gui \" << gui << endl;\t\r\n\t\t}else{\r\n\t\t\tss << \"gui is null\" << endl;\r\n\t\t}\r\n\t\tofDrawBitmapStringHighlight(ss.str(), 20, ofGetHeight() - 50);\r\n\t\t\r\n\t};\r\n\t//----------------------------------------------------------------\r\n\tvoid keyPressed(int key)\r\n\t{\r\n\t\tif(key == ' '){\r\n\t\t\tdisplayGui = !displayGui;\r\n\t\t}else if(key == 'd'){\r\n\t\t\tdelete gui;\r\n\t\t\tgui = nullptr;\r\n\t\t}\r\n\t}\r\n\t//----------------------------------------------------------------\r\n\tofxPanel* gui= nullptr;\r\n\tofParameter<float> param;\r\n\tofxFloatSlider* sliderPtr = nullptr;\r\n\tbool displayGui = true;\r\n};\r\n\r\nint main( )\r\n{\r\n\tofSetupOpenGL(400,200,OF_WINDOW);\r\n\tofRunApp(new ofApp());\r\n}\r\n\r\n````\r\n\r\nMaybe moving all the pointers used into smart pointer can be a good idea. What do you think @arturoc @bakercp @ofZach ?\r\n\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6144","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6144/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6144/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6144/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6144","id":366994113,"node_id":"MDExOlB1bGxSZXF1ZXN0MjIwNTM2MDY4","number":6144,"title":"Fix ofx gui slider scrolling","user":{"login":"roymacdonald","id":974878,"node_id":"MDQ6VXNlcjk3NDg3OA==","avatar_url":"https://avatars0.githubusercontent.com/u/974878?v=4","gravatar_id":"","url":"https://api.github.com/users/roymacdonald","html_url":"https://github.com/roymacdonald","followers_url":"https://api.github.com/users/roymacdonald/followers","following_url":"https://api.github.com/users/roymacdonald/following{/other_user}","gists_url":"https://api.github.com/users/roymacdonald/gists{/gist_id}","starred_url":"https://api.github.com/users/roymacdonald/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/roymacdonald/subscriptions","organizations_url":"https://api.github.com/users/roymacdonald/orgs","repos_url":"https://api.github.com/users/roymacdonald/repos","events_url":"https://api.github.com/users/roymacdonald/events{/privacy}","received_events_url":"https://api.github.com/users/roymacdonald/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-10-04T22:28:00Z","updated_at":"2018-10-17T02:20:40Z","closed_at":null,"author_association":"MEMBER","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6144","html_url":"https://github.com/openframeworks/openFrameworks/pull/6144","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6144.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6144.patch"},"body":"Fix for this issue https://github.com/openframeworks/openFrameworks/issues/6133\r\n\r\nThe scrolling was still being handled when the gui was not being drawn both in ofxSlider and ofxInputField. \r\n\r\nTested using the code provided in the mentioned issue.\r\n\r\nAlso, \r\nthere is some redundant code in ofxSlider.cpp and ofxInputField.cpp. Both have a function called `toRange(...)` which is identical in both. When the scroll callback is called this function is called. Can't we move this function to the ofxBaseGui for instance? Also, the code used in the scroll callback in both ofxSlider and ofxInputField is quite similar, maybe moving this to ofxBaseGui might be better too. \r\n\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6143","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6143/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6143/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6143/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6143","id":366546777,"node_id":"MDU6SXNzdWUzNjY1NDY3Nzc=","number":6143,"title":"Missing support for non 2D texture types in ofTexture","user":{"login":"m1keall1son","id":2464817,"node_id":"MDQ6VXNlcjI0NjQ4MTc=","avatar_url":"https://avatars2.githubusercontent.com/u/2464817?v=4","gravatar_id":"","url":"https://api.github.com/users/m1keall1son","html_url":"https://github.com/m1keall1son","followers_url":"https://api.github.com/users/m1keall1son/followers","following_url":"https://api.github.com/users/m1keall1son/following{/other_user}","gists_url":"https://api.github.com/users/m1keall1son/gists{/gist_id}","starred_url":"https://api.github.com/users/m1keall1son/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/m1keall1son/subscriptions","organizations_url":"https://api.github.com/users/m1keall1son/orgs","repos_url":"https://api.github.com/users/m1keall1son/repos","events_url":"https://api.github.com/users/m1keall1son/events{/privacy}","received_events_url":"https://api.github.com/users/m1keall1son/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":6,"created_at":"2018-10-03T22:00:11Z","updated_at":"2018-10-05T11:29:52Z","closed_at":null,"author_association":"NONE","body":"Unless i'm mistaken, I can't find any support for non-2D texture types. \r\n\r\nmissing:\r\n`GL_TEXTURE_2D_ARRAY`,\r\n`GL_TEXTURE_3D`,\r\n`GL_TEXTURE_1D`,\r\n`GL_TEXTURE_1D_ARRAY`,\r\n`GL_TEXTURE_CUBE_MAP`,\r\n`GL_TEXTURE_CUBE_MAP_ARRAY`,\r\n\r\nIt would make most sense to separate out the common functionality into an `ofTextureBase` base class and derive into something like `ofTexture1D`, `ofTexture2D`, `ofTexture3D` and `ofTextureCubeMap`. For backwards compatibility alias `ofTexture2D` to `ofTexture`."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6142","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6142/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6142/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6142/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6142","id":366403137,"node_id":"MDExOlB1bGxSZXF1ZXN0MjIwMDg1NTE1","number":6142,"title":"fixes xcode template to not have OF as a dependcy but as a pre-build …","user":{"login":"ofTheo","id":144000,"node_id":"MDQ6VXNlcjE0NDAwMA==","avatar_url":"https://avatars3.githubusercontent.com/u/144000?v=4","gravatar_id":"","url":"https://api.github.com/users/ofTheo","html_url":"https://github.com/ofTheo","followers_url":"https://api.github.com/users/ofTheo/followers","following_url":"https://api.github.com/users/ofTheo/following{/other_user}","gists_url":"https://api.github.com/users/ofTheo/gists{/gist_id}","starred_url":"https://api.github.com/users/ofTheo/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ofTheo/subscriptions","organizations_url":"https://api.github.com/users/ofTheo/orgs","repos_url":"https://api.github.com/users/ofTheo/repos","events_url":"https://api.github.com/users/ofTheo/events{/privacy}","received_events_url":"https://api.github.com/users/ofTheo/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":6,"created_at":"2018-10-03T15:41:06Z","updated_at":"2018-10-06T14:51:37Z","closed_at":null,"author_association":"CONTRIBUTOR","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6142","html_url":"https://github.com/openframeworks/openFrameworks/pull/6142","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6142.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6142.patch"},"body":"…script. related to #6139"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6141","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6141/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6141/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6141/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6141","id":365257952,"node_id":"MDU6SXNzdWUzNjUyNTc5NTI=","number":6141,"title":"OF_RECTMODE_CENTER doesn't work on ofDrawRectRounded","user":{"login":"Nedelstein","id":41701865,"node_id":"MDQ6VXNlcjQxNzAxODY1","avatar_url":"https://avatars2.githubusercontent.com/u/41701865?v=4","gravatar_id":"","url":"https://api.github.com/users/Nedelstein","html_url":"https://github.com/Nedelstein","followers_url":"https://api.github.com/users/Nedelstein/followers","following_url":"https://api.github.com/users/Nedelstein/following{/other_user}","gists_url":"https://api.github.com/users/Nedelstein/gists{/gist_id}","starred_url":"https://api.github.com/users/Nedelstein/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/Nedelstein/subscriptions","organizations_url":"https://api.github.com/users/Nedelstein/orgs","repos_url":"https://api.github.com/users/Nedelstein/repos","events_url":"https://api.github.com/users/Nedelstein/events{/privacy}","received_events_url":"https://api.github.com/users/Nedelstein/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-30T20:22:36Z","updated_at":"2018-09-30T20:22:36Z","closed_at":null,"author_association":"NONE","body":"Not the biggest deal, but thought it was worth mentioning. You can't seem to change the rectmode when using ofDrawRectRounded.\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6140","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6140/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6140/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6140/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6140","id":365198033,"node_id":"MDExOlB1bGxSZXF1ZXN0MjE5MTg4NTUz","number":6140,"title":"add ofSystemConfirmDialog for confirming actions","user":{"login":"s-ol","id":1731279,"node_id":"MDQ6VXNlcjE3MzEyNzk=","avatar_url":"https://avatars1.githubusercontent.com/u/1731279?v=4","gravatar_id":"","url":"https://api.github.com/users/s-ol","html_url":"https://github.com/s-ol","followers_url":"https://api.github.com/users/s-ol/followers","following_url":"https://api.github.com/users/s-ol/following{/other_user}","gists_url":"https://api.github.com/users/s-ol/gists{/gist_id}","starred_url":"https://api.github.com/users/s-ol/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/s-ol/subscriptions","organizations_url":"https://api.github.com/users/s-ol/orgs","repos_url":"https://api.github.com/users/s-ol/repos","events_url":"https://api.github.com/users/s-ol/events{/privacy}","received_events_url":"https://api.github.com/users/s-ol/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":4,"created_at":"2018-09-30T06:44:10Z","updated_at":"2018-10-12T07:28:24Z","closed_at":null,"author_association":"NONE","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6140","html_url":"https://github.com/openframeworks/openFrameworks/pull/6140","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6140.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6140.patch"},"body":"This PR supersedes #3228 (ofSystemChoiceDialog).\r\n\r\nI have another cleanup commit ready (b90df914359e7eccde1de5dbb600278d1bf77538) but didn't attach it to this branch so the diff is clean for now.\r\n\r\nStatus:\r\n\r\n| | implemented | tested |\r\n| ----------------- | ---| -----|\r\n| Linux (GTK) | yes | yes |\r\n| Windows | yes | no |\r\n| OS X | yes | yes |\r\n| Android | no | - |\r\n\r\nPlease help with testing!"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6139","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6139/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6139/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6139/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6139","id":365180966,"node_id":"MDU6SXNzdWUzNjUxODA5NjY=","number":6139,"title":"mojave + xcode 10 issues","user":{"login":"ofZach","id":142897,"node_id":"MDQ6VXNlcjE0Mjg5Nw==","avatar_url":"https://avatars3.githubusercontent.com/u/142897?v=4","gravatar_id":"","url":"https://api.github.com/users/ofZach","html_url":"https://github.com/ofZach","followers_url":"https://api.github.com/users/ofZach/followers","following_url":"https://api.github.com/users/ofZach/following{/other_user}","gists_url":"https://api.github.com/users/ofZach/gists{/gist_id}","starred_url":"https://api.github.com/users/ofZach/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ofZach/subscriptions","organizations_url":"https://api.github.com/users/ofZach/orgs","repos_url":"https://api.github.com/users/ofZach/repos","events_url":"https://api.github.com/users/ofZach/events{/privacy}","received_events_url":"https://api.github.com/users/ofZach/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":5,"created_at":"2018-09-30T01:17:56Z","updated_at":"2018-10-01T07:46:46Z","closed_at":null,"author_association":"CONTRIBUTOR","body":"\r\nI'm looking at OF on Mojave and Xcode 10 which is kind of painful now -- \r\n\r\nsome notes (cc @ofTheo) \r\n\r\na) 32 bit needs to be removed (this has already been done in master...) \r\n\r\nb) on mojave apps need permission set as a plist for camera, microphone, and any other thing we might need to do that requires permissions (ala iPhone). I feel like the easiest is we just add these to the template plist so that they are already set. \r\n\r\nc) there's something not right about the overall OF sub project, at the moment you can't open more than one OF project at the same time and build them (before you used to get an integrity warning but now it seems there is more of an error so you actually just can't build more than one project at once... it's super weird) \r\n\r\nd) openframeworks library always gets rebuilt for each project :( \r\n\r\n(maybe looking at this issue which this reminded me of -- https://github.com/openframeworks/openFrameworks/issues/5895) \r\n\r\nI'm looking into b/c/d at the moment... "},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6137","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6137/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6137/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6137/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6137","id":364358354,"node_id":"MDExOlB1bGxSZXF1ZXN0MjE4NTY0Mzgx","number":6137,"title":"Add PUT request to ofURLFileLoader","user":{"login":"chriship","id":4815637,"node_id":"MDQ6VXNlcjQ4MTU2Mzc=","avatar_url":"https://avatars2.githubusercontent.com/u/4815637?v=4","gravatar_id":"","url":"https://api.github.com/users/chriship","html_url":"https://github.com/chriship","followers_url":"https://api.github.com/users/chriship/followers","following_url":"https://api.github.com/users/chriship/following{/other_user}","gists_url":"https://api.github.com/users/chriship/gists{/gist_id}","starred_url":"https://api.github.com/users/chriship/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/chriship/subscriptions","organizations_url":"https://api.github.com/users/chriship/orgs","repos_url":"https://api.github.com/users/chriship/repos","events_url":"https://api.github.com/users/chriship/events{/privacy}","received_events_url":"https://api.github.com/users/chriship/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":2,"created_at":"2018-09-27T08:25:12Z","updated_at":"2018-09-27T15:58:42Z","closed_at":null,"author_association":"NONE","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6137","html_url":"https://github.com/openframeworks/openFrameworks/pull/6137","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6137.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6137.patch"},"body":"Add the ability to do a PUT request with ofHttpRequest"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6136","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6136/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6136/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6136/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6136","id":364357220,"node_id":"MDExOlB1bGxSZXF1ZXN0MjE4NTYzNTMy","number":6136,"title":"Fix kerning issues in ofTrueTypeFont","user":{"login":"chriship","id":4815637,"node_id":"MDQ6VXNlcjQ4MTU2Mzc=","avatar_url":"https://avatars2.githubusercontent.com/u/4815637?v=4","gravatar_id":"","url":"https://api.github.com/users/chriship","html_url":"https://github.com/chriship","followers_url":"https://api.github.com/users/chriship/followers","following_url":"https://api.github.com/users/chriship/following{/other_user}","gists_url":"https://api.github.com/users/chriship/gists{/gist_id}","starred_url":"https://api.github.com/users/chriship/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/chriship/subscriptions","organizations_url":"https://api.github.com/users/chriship/orgs","repos_url":"https://api.github.com/users/chriship/repos","events_url":"https://api.github.com/users/chriship/events{/privacy}","received_events_url":"https://api.github.com/users/chriship/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":4,"created_at":"2018-09-27T08:22:04Z","updated_at":"2018-10-08T19:19:41Z","closed_at":null,"author_association":"NONE","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6136","html_url":"https://github.com/openframeworks/openFrameworks/pull/6136","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6136.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6136.patch"},"body":"Freetype uses fixed point number for font calculation (26.6).\r\n\r\nIt means 26 bit integer and 6 bit decimal.\r\n\r\nSo we can not simply use the value of `FT_Get_Kerning` because we need to account for the .6\r\n\r\nHow to calculate?\r\nWe do bit shift to the right.\r\n\r\n`pen_x += delta.x >> 6;`\r\n\r\nThis is same calculation as dividing by 64 (2^6=64).\r\n\r\nSee Simple Text Rendering: Kerning and Centering sesction in this page. https://www.freetype.org/freetype2/docs/tutorial/step2.html#section-4\r\n\r\nThanks to @hiroMTB for figuring this out.\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6133","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6133/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6133/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6133/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6133","id":363073568,"node_id":"MDU6SXNzdWUzNjMwNzM1Njg=","number":6133,"title":"ofxGui scroll is active when it shouldn't be","user":{"login":"samubence","id":241047,"node_id":"MDQ6VXNlcjI0MTA0Nw==","avatar_url":"https://avatars2.githubusercontent.com/u/241047?v=4","gravatar_id":"","url":"https://api.github.com/users/samubence","html_url":"https://github.com/samubence","followers_url":"https://api.github.com/users/samubence/followers","following_url":"https://api.github.com/users/samubence/following{/other_user}","gists_url":"https://api.github.com/users/samubence/gists{/gist_id}","starred_url":"https://api.github.com/users/samubence/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/samubence/subscriptions","organizations_url":"https://api.github.com/users/samubence/orgs","repos_url":"https://api.github.com/users/samubence/repos","events_url":"https://api.github.com/users/samubence/events{/privacy}","received_events_url":"https://api.github.com/users/samubence/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":2,"created_at":"2018-09-24T09:38:21Z","updated_at":"2018-10-04T22:28:29Z","closed_at":null,"author_association":"NONE","body":"You can change the value of a parameter e.g ofxSlider using the mouse scroll... which is a great feature.\r\nBut it still happens when the group is minimised and the cursor is not over the element. (it is very weird that in this case the slider only goes up and much faster than is was maximised)\r\nThe same happens when we don't display the panel, scroll still active.\r\nThis is a minimal code to demonstrate the behaviour. \r\n\r\nI'm using of_0.10.0 on macOS, xcode 10.0\r\n\r\n```\r\n#include \"ofMain.h\"\r\n#include \"ofxGui.h\"\r\n\r\nclass ofApp : public ofBaseApp\r\n{\r\npublic:\r\n void setup()\r\n {\r\n gui.setDefaultWidth(400);\r\n gui.setup();\r\n gui.setPosition(0, 0);\r\n group.setName(\"GROUP\");\r\n group.add(param.set(\"param\", 0, 0, 255));\r\n gui.add(group);\r\n displayGui = true;\r\n };\r\n \r\n void draw()\r\n {\r\n ofBackground(param, 0, 0);\r\n \r\n if (displayGui)\r\n {\r\n gui.draw();\r\n }\r\n };\r\n \r\n void keyPressed(int key)\r\n {\r\n displayGui = !displayGui;\r\n }\r\n \r\n ofxPanel gui;\r\n ofParameterGroup group;\r\n ofParameter<float> param;\r\n bool displayGui;\r\n};\r\n\r\nint main( )\r\n{\r\n\tofSetupOpenGL(400,100,OF_WINDOW);\r\n\tofRunApp(new ofApp());\r\n}\r\n```"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6132","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6132/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6132/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6132/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6132","id":362785091,"node_id":"MDU6SXNzdWUzNjI3ODUwOTE=","number":6132,"title":"freeimage.dll not found (Windows 10, OF 0.10.0)","user":{"login":"k2msmith","id":41271669,"node_id":"MDQ6VXNlcjQxMjcxNjY5","avatar_url":"https://avatars3.githubusercontent.com/u/41271669?v=4","gravatar_id":"","url":"https://api.github.com/users/k2msmith","html_url":"https://github.com/k2msmith","followers_url":"https://api.github.com/users/k2msmith/followers","following_url":"https://api.github.com/users/k2msmith/following{/other_user}","gists_url":"https://api.github.com/users/k2msmith/gists{/gist_id}","starred_url":"https://api.github.com/users/k2msmith/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/k2msmith/subscriptions","organizations_url":"https://api.github.com/users/k2msmith/orgs","repos_url":"https://api.github.com/users/k2msmith/repos","events_url":"https://api.github.com/users/k2msmith/events{/privacy}","received_events_url":"https://api.github.com/users/k2msmith/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-21T21:48:33Z","updated_at":"2018-09-21T21:48:33Z","closed_at":null,"author_association":"NONE","body":"On Windows 10 Pro and version 0.10.0 OF. On some machines OF VS build/ link doesn’t find the freeimage.dll. This is puzzling because it appears that OF has a copy of the library in it’s release tree. So, why would I need to install another copy of the library on Windows ? I have reinstalled the library, but eventually the problem comes back. I am guessing that a windows update may have stomped on it and removed it again. The problem only occurs for me on w32 but not x64. On googling around, it appears other OF users have encountered the same problem (so it's just not my installation)"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6129","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6129/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6129/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6129/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6129","id":361464541,"node_id":"MDU6SXNzdWUzNjE0NjQ1NDE=","number":6129,"title":"ofDirectory::sort() is slow! ","user":{"login":"ofTheo","id":144000,"node_id":"MDQ6VXNlcjE0NDAwMA==","avatar_url":"https://avatars3.githubusercontent.com/u/144000?v=4","gravatar_id":"","url":"https://api.github.com/users/ofTheo","html_url":"https://github.com/ofTheo","followers_url":"https://api.github.com/users/ofTheo/followers","following_url":"https://api.github.com/users/ofTheo/following{/other_user}","gists_url":"https://api.github.com/users/ofTheo/gists{/gist_id}","starred_url":"https://api.github.com/users/ofTheo/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ofTheo/subscriptions","organizations_url":"https://api.github.com/users/ofTheo/orgs","repos_url":"https://api.github.com/users/ofTheo/repos","events_url":"https://api.github.com/users/ofTheo/events{/privacy}","received_events_url":"https://api.github.com/users/ofTheo/received_events","type":"User","site_admin":false},"labels":[{"id":14818,"node_id":"MDU6TGFiZWwxNDgxOA==","url":"https://api.github.com/repos/openframeworks/openFrameworks/labels/core","name":"core","color":"db6a1f","default":false}],"state":"open","locked":false,"assignee":{"login":"ofTheo","id":144000,"node_id":"MDQ6VXNlcjE0NDAwMA==","avatar_url":"https://avatars3.githubusercontent.com/u/144000?v=4","gravatar_id":"","url":"https://api.github.com/users/ofTheo","html_url":"https://github.com/ofTheo","followers_url":"https://api.github.com/users/ofTheo/followers","following_url":"https://api.github.com/users/ofTheo/following{/other_user}","gists_url":"https://api.github.com/users/ofTheo/gists{/gist_id}","starred_url":"https://api.github.com/users/ofTheo/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ofTheo/subscriptions","organizations_url":"https://api.github.com/users/ofTheo/orgs","repos_url":"https://api.github.com/users/ofTheo/repos","events_url":"https://api.github.com/users/ofTheo/events{/privacy}","received_events_url":"https://api.github.com/users/ofTheo/received_events","type":"User","site_admin":false},"assignees":[{"login":"ofTheo","id":144000,"node_id":"MDQ6VXNlcjE0NDAwMA==","avatar_url":"https://avatars3.githubusercontent.com/u/144000?v=4","gravatar_id":"","url":"https://api.github.com/users/ofTheo","html_url":"https://github.com/ofTheo","followers_url":"https://api.github.com/users/ofTheo/followers","following_url":"https://api.github.com/users/ofTheo/following{/other_user}","gists_url":"https://api.github.com/users/ofTheo/gists{/gist_id}","starred_url":"https://api.github.com/users/ofTheo/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ofTheo/subscriptions","organizations_url":"https://api.github.com/users/ofTheo/orgs","repos_url":"https://api.github.com/users/ofTheo/repos","events_url":"https://api.github.com/users/ofTheo/events{/privacy}","received_events_url":"https://api.github.com/users/ofTheo/received_events","type":"User","site_admin":false}],"milestone":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/milestones/21","html_url":"https://github.com/openframeworks/openFrameworks/milestone/21","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/milestones/21/labels","id":902324,"node_id":"MDk6TWlsZXN0b25lOTAyMzI0","number":21,"title":"0.10.1","description":"","creator":{"login":"kylemcdonald","id":157106,"node_id":"MDQ6VXNlcjE1NzEwNg==","avatar_url":"https://avatars3.githubusercontent.com/u/157106?v=4","gravatar_id":"","url":"https://api.github.com/users/kylemcdonald","html_url":"https://github.com/kylemcdonald","followers_url":"https://api.github.com/users/kylemcdonald/followers","following_url":"https://api.github.com/users/kylemcdonald/following{/other_user}","gists_url":"https://api.github.com/users/kylemcdonald/gists{/gist_id}","starred_url":"https://api.github.com/users/kylemcdonald/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/kylemcdonald/subscriptions","organizations_url":"https://api.github.com/users/kylemcdonald/orgs","repos_url":"https://api.github.com/users/kylemcdonald/repos","events_url":"https://api.github.com/users/kylemcdonald/events{/privacy}","received_events_url":"https://api.github.com/users/kylemcdonald/received_events","type":"User","site_admin":false},"open_issues":31,"closed_issues":6,"state":"open","created_at":"2014-12-12T22:37:18Z","updated_at":"2018-09-18T20:11:58Z","due_on":null,"closed_at":null},"comments":5,"created_at":"2018-09-18T20:11:58Z","updated_at":"2018-10-22T08:44:25Z","closed_at":null,"author_association":"CONTRIBUTOR","body":"I noticed that with old projects which have a lot of png sequences that a project that used to take 20-30 seconds to load are now taking 15+ minutes. \r\n\r\nAll I did was add sort() after calls to listDir() ( due to the non-sorted directories Apple lovingly introduced #5852 ) \r\n\r\nTaking the current dirListExample, calling sort() with listDir() makes a directory listing takes more than 10x longer than not calling sort(), but then getting randomly ordered results. \r\n\r\nI noticed that if I sort the files first as strings instead of as a vector ofFile::References it is almost as fast as not sorting at all. \r\n\r\nI think this might be due to the extra overhead of having files loaded into memory ( even as references ). \r\n\r\nI need to put together a PR for this, the main issue I see is that I am sorting inside of listDir before even creating the files vector. The plus side is you get sorted results by default without having to call sort(); But calling sort() will still be slow. I could prob get the fix to work in sort() too without losing the performance improvement. It might just need to rebuild the file list which is doable. \r\n\r\nThis is a comparison for sorting 110 files. \r\n```\r\nDebug\r\n time diff for file list with current sort: 0.349263 seconds \r\n time diff for file list with string based sort: 0.0325761 seconds \r\n\r\n= 10.7x faster with string based sort \r\n\r\nRelease\r\n time diff for file list with current sort: 0.339156 seconds \r\n time diff for file list with string based sort: 0.0316965 seconds \r\n\r\n= 10.7x faster with string based sort \r\n ```\r\n\r\nFor 792 files:\r\n\r\n```\r\nRelease:\r\n time diff for file list with current sort: 4.0761 seconds \r\n time diff for file list with string based sort: 0.180382 seconds \r\n\r\n= 21x faster with string based sort\r\n```\r\n\r\nGoing to post some more thoughts to this thread. \r\n\r\n\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6128","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6128/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6128/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6128/events","html_url":"https://github.com/openframeworks/openFrameworks/pull/6128","id":361122717,"node_id":"MDExOlB1bGxSZXF1ZXN0MjE2MTczNDIz","number":6128,"title":"Issue when saving jpg","user":{"login":"cyrstem","id":2263696,"node_id":"MDQ6VXNlcjIyNjM2OTY=","avatar_url":"https://avatars0.githubusercontent.com/u/2263696?v=4","gravatar_id":"","url":"https://api.github.com/users/cyrstem","html_url":"https://github.com/cyrstem","followers_url":"https://api.github.com/users/cyrstem/followers","following_url":"https://api.github.com/users/cyrstem/following{/other_user}","gists_url":"https://api.github.com/users/cyrstem/gists{/gist_id}","starred_url":"https://api.github.com/users/cyrstem/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/cyrstem/subscriptions","organizations_url":"https://api.github.com/users/cyrstem/orgs","repos_url":"https://api.github.com/users/cyrstem/repos","events_url":"https://api.github.com/users/cyrstem/events{/privacy}","received_events_url":"https://api.github.com/users/cyrstem/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":4,"created_at":"2018-09-18T03:33:39Z","updated_at":"2018-10-05T03:35:54Z","closed_at":null,"author_association":"NONE","pull_request":{"url":"https://api.github.com/repos/openframeworks/openFrameworks/pulls/6128","html_url":"https://github.com/openframeworks/openFrameworks/pull/6128","diff_url":"https://github.com/openframeworks/openFrameworks/pull/6128.diff","patch_url":"https://github.com/openframeworks/openFrameworks/pull/6128.patch"},"body":"changed from auto pixelFormat = OF_PIXELS_BGRA;\r\nto auto pixelFormat = OF_PIXELS_RGBA;\r\ndon/t know if ALPHA channel should stay but this fix an issue saving images with grabScreen"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6127","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6127/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6127/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6127/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6127","id":361033985,"node_id":"MDU6SXNzdWUzNjEwMzM5ODU=","number":6127,"title":"ofxPoco Problem Android Studio","user":{"login":"dottoremad","id":11161510,"node_id":"MDQ6VXNlcjExMTYxNTEw","avatar_url":"https://avatars3.githubusercontent.com/u/11161510?v=4","gravatar_id":"","url":"https://api.github.com/users/dottoremad","html_url":"https://github.com/dottoremad","followers_url":"https://api.github.com/users/dottoremad/followers","following_url":"https://api.github.com/users/dottoremad/following{/other_user}","gists_url":"https://api.github.com/users/dottoremad/gists{/gist_id}","starred_url":"https://api.github.com/users/dottoremad/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/dottoremad/subscriptions","organizations_url":"https://api.github.com/users/dottoremad/orgs","repos_url":"https://api.github.com/users/dottoremad/repos","events_url":"https://api.github.com/users/dottoremad/events{/privacy}","received_events_url":"https://api.github.com/users/dottoremad/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-17T20:37:28Z","updated_at":"2018-09-17T20:37:28Z","closed_at":null,"author_association":"NONE","body":"I am working on a project using ofxPoco and ofxHttpUtils.\r\nEverything works fine in Visual Studio on Windows and in Xcode on MacOS.\r\n\r\nIn Android Studio (Windows v 3.1.2 and MacOS v 3.1.4) using OF 0.10.0, the includes regarding Poco (addons\\ofxPoco\\libs\\poco\\include\\Poco) are not found, as for example\r\n#include <Poco/DOM/Document.h> at line 9 of ofxPoco/src/ofxXmlPoco.h\r\n\r\nTo reproduce the problem copy paste the empty example and add ofxPoco using the project generator, then open from Android Studio and build it."},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6126","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6126/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6126/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6126/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6126","id":360902537,"node_id":"MDU6SXNzdWUzNjA5MDI1Mzc=","number":6126,"title":"emscripten emrun missing functions","user":{"login":"Mach1Studios","id":13720381,"node_id":"MDQ6VXNlcjEzNzIwMzgx","avatar_url":"https://avatars1.githubusercontent.com/u/13720381?v=4","gravatar_id":"","url":"https://api.github.com/users/Mach1Studios","html_url":"https://github.com/Mach1Studios","followers_url":"https://api.github.com/users/Mach1Studios/followers","following_url":"https://api.github.com/users/Mach1Studios/following{/other_user}","gists_url":"https://api.github.com/users/Mach1Studios/gists{/gist_id}","starred_url":"https://api.github.com/users/Mach1Studios/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/Mach1Studios/subscriptions","organizations_url":"https://api.github.com/users/Mach1Studios/orgs","repos_url":"https://api.github.com/users/Mach1Studios/repos","events_url":"https://api.github.com/users/Mach1Studios/events{/privacy}","received_events_url":"https://api.github.com/users/Mach1Studios/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-17T14:42:49Z","updated_at":"2018-09-17T14:42:49Z","closed_at":null,"author_association":"NONE","body":"After going through all the build instructions below with the latest, I am unable to run any example or project, i have tried several combinations and used `emcc --clear-cache` and `emmake make clean` throughout rebuilds with no success.\r\n\r\n```\r\n./emsdk update\r\n./emsdk install latest\r\n./emsdk activate latest\r\nsource ./emsdk_env.sh\r\ngit clone --depth=1 https://github.com/openFrameworks/openFrameworks.git\r\ncd openFrameworks\r\nscripts/emscripten/./download_libs.sh\r\ncd examples/3d/3DPrimitivesExample\r\ncp ../../../scripts/templates/emscripten/Makefile .\r\nemmake make\r\n```\r\nI have even added \r\n`cp ../../../scripts/templates/emscripten/config.make .`\r\nwhich did not help either\r\n\r\nthis is the result is always along the lines of:\r\n`$ emrun --browser chrome bin/3DPrimitivesExample.html` \r\n```\r\nExecuting /Applications/Google Chrome.app/Contents/MacOS/Google Chrome --incognito --enable-nacl --enable-pnacl --disable-restore-session-state --enable-webgl --no-default-browser-check --no-first-run --allow-file-access-from-files http://localhost:6931/3DPrimitivesExample.html\r\nCreated new window in existing browser session.\r\nmissing function: _Znwj\r\n-1\r\n-1\r\n```"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6125","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6125/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6125/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6125/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6125","id":359925364,"node_id":"MDU6SXNzdWUzNTk5MjUzNjQ=","number":6125,"title":"ofAbstractParameter::isType<ParameterType>()","user":{"login":"eduardfrigola","id":7095844,"node_id":"MDQ6VXNlcjcwOTU4NDQ=","avatar_url":"https://avatars2.githubusercontent.com/u/7095844?v=4","gravatar_id":"","url":"https://api.github.com/users/eduardfrigola","html_url":"https://github.com/eduardfrigola","followers_url":"https://api.github.com/users/eduardfrigola/followers","following_url":"https://api.github.com/users/eduardfrigola/following{/other_user}","gists_url":"https://api.github.com/users/eduardfrigola/gists{/gist_id}","starred_url":"https://api.github.com/users/eduardfrigola/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/eduardfrigola/subscriptions","organizations_url":"https://api.github.com/users/eduardfrigola/orgs","repos_url":"https://api.github.com/users/eduardfrigola/repos","events_url":"https://api.github.com/users/eduardfrigola/events{/privacy}","received_events_url":"https://api.github.com/users/eduardfrigola/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-13T14:37:31Z","updated_at":"2018-09-13T14:37:31Z","closed_at":null,"author_association":"NONE","body":"I have been working with ofParameter a lot recently, and, despite trying to use templates as much as I can, somewhere I end up checking the type of a parameter. \r\n\r\n```cpp\r\nif(p.type() == typeid(ofParameter<float>).name()){\r\n auto &cast = p.cast<float>();\r\n}\r\nelse if(p.type() == typeid(ofParameter<int>).name()){\r\n auto &cast = p.cast<int>();\r\n}\r\nelse if(p.type() == typeid(ofParameter<bool>).name()){\r\n auto &cast = p.cast<bool>();\r\n}\r\nelse if(p.type() == typeid(ofParameter<ofColor>).name()){\r\n auto &cast = p.cast<ofColor>();\r\n}\r\nelse if(p.type() == typeid(ofParameter<string>).name()){\r\n auto &cast = p.cast<string>();\r\n}\r\n```\r\n\r\n\r\nI thing it will be nice to add a function to ```ofAbstractParameter``` class to be able to check the parameter type with less code, and something more readable. Something as:\r\n\r\n```cpp\r\ntemplate<typename ParameterType>\r\nbool isType(){\r\n\treturn type() == typeid(ofParameter<ParameterType>).name();\r\n}\r\n```\r\n\r\nThe old code with the new function:\r\n```cpp\r\nif(p.isType<float>()){\r\n auto &cast = p.cast<float>();\r\n}\r\nelse if(p.isType<int>()){\r\n auto &cast = p.cast<int>();\r\n}\r\nelse if(p.isType<bool>()){\r\n auto &cast = p.cast<bool>();\r\n}\r\nelse if(p.isType<ofColor>()){\r\n auto &cast = p.cast<ofColor>();\r\n}\r\nelse if(p.isType<string>()){\r\n auto &cast = p.cast<string>();\r\n}\r\n```\r\n\r\n\r\nDoes this make sense? \r\nIt makes ```type()``` unclear?\r\nDoes the name ```isType()``` is the correct one to use?\r\n\r\nEduard\r\n"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6124","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6124/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6124/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6124/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6124","id":358370579,"node_id":"MDU6SXNzdWUzNTgzNzA1Nzk=","number":6124,"title":"ofVideoPlayer in emscripten doesn't support urls","user":{"login":"arturoc","id":48240,"node_id":"MDQ6VXNlcjQ4MjQw","avatar_url":"https://avatars0.githubusercontent.com/u/48240?v=4","gravatar_id":"","url":"https://api.github.com/users/arturoc","html_url":"https://github.com/arturoc","followers_url":"https://api.github.com/users/arturoc/followers","following_url":"https://api.github.com/users/arturoc/following{/other_user}","gists_url":"https://api.github.com/users/arturoc/gists{/gist_id}","starred_url":"https://api.github.com/users/arturoc/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/arturoc/subscriptions","organizations_url":"https://api.github.com/users/arturoc/orgs","repos_url":"https://api.github.com/users/arturoc/repos","events_url":"https://api.github.com/users/arturoc/events{/privacy}","received_events_url":"https://api.github.com/users/arturoc/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":7,"created_at":"2018-09-09T11:22:14Z","updated_at":"2018-09-11T07:55:25Z","closed_at":null,"author_association":"MEMBER","body":"we should check if the passed path is an url and not try to open a file in that case"},{"url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6119","repository_url":"https://api.github.com/repos/openframeworks/openFrameworks","labels_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6119/labels{/name}","comments_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6119/comments","events_url":"https://api.github.com/repos/openframeworks/openFrameworks/issues/6119/events","html_url":"https://github.com/openframeworks/openFrameworks/issues/6119","id":356742467,"node_id":"MDU6SXNzdWUzNTY3NDI0Njc=","number":6119,"title":"support ofFastEvent in ofParameter","user":{"login":"sphaero","id":832465,"node_id":"MDQ6VXNlcjgzMjQ2NQ==","avatar_url":"https://avatars1.githubusercontent.com/u/832465?v=4","gravatar_id":"","url":"https://api.github.com/users/sphaero","html_url":"https://github.com/sphaero","followers_url":"https://api.github.com/users/sphaero/followers","following_url":"https://api.github.com/users/sphaero/following{/other_user}","gists_url":"https://api.github.com/users/sphaero/gists{/gist_id}","starred_url":"https://api.github.com/users/sphaero/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/sphaero/subscriptions","organizations_url":"https://api.github.com/users/sphaero/orgs","repos_url":"https://api.github.com/users/sphaero/repos","events_url":"https://api.github.com/users/sphaero/events{/privacy}","received_events_url":"https://api.github.com/users/sphaero/received_events","type":"User","site_admin":false},"labels":[],"state":"open","locked":false,"assignee":null,"assignees":[],"milestone":null,"comments":0,"created_at":"2018-09-04T10:12:42Z","updated_at":"2018-09-04T10:12:42Z","closed_at":null,"author_association":"CONTRIBUTOR","body":"As discussed here: https://forum.openframeworks.cc/t/ofparametergroup-and-custom-inherited-ofparameter-class/30237/12\r\n\r\ni.e. add an extra optional parameter to ofParameter which enabled ofFastEvent instead of the default ofEvent"}] + +https +GET +api.github.com +None +/repos/openframeworks/openFrameworks/stargazers +{'Accept': 'application/vnd.github.v3.star+json', 'Authorization': 'Basic login_and_password_removed', 'User-Agent': 'PyGithub/Python'} +None +200 +[('Server', 'GitHub.com'), ('Date', 'Fri, 26 Oct 2018 06:02:42 GMT'), ('Content-Type', 'application/json; charset=utf-8'), ('Transfer-Encoding', 'chunked'), ('Status', '200 OK'), ('X-RateLimit-Limit', '5000'), ('X-RateLimit-Remaining', '4970'), ('X-RateLimit-Reset', '1540536267'), ('Cache-Control', 'private, max-age=60, s-maxage=60'), ('Vary', 'Accept, Authorization, Cookie, X-GitHub-OTP'), ('ETag', 'W/"3ffd69d4f4e7383eae948812b1eb72e5"'), ('X-OAuth-Scopes', 'repo'), ('X-Accepted-OAuth-Scopes', ''), ('X-GitHub-Media-Type', 'github.v3; param=star; format=json'), ('Link', '<https://api.github.com/repositories/345337/stargazers?page=2>; rel="next", <https://api.github.com/repositories/345337/stargazers?page=216>; rel="last"'), ('Access-Control-Expose-Headers', 'ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval'), ('Access-Control-Allow-Origin', '*'), ('Strict-Transport-Security', 'max-age=31536000; includeSubdomains; preload'), ('X-Frame-Options', 'deny'), ('X-Content-Type-Options', 'nosniff'), ('X-XSS-Protection', '1; mode=block'), ('Referrer-Policy', 'origin-when-cross-origin, strict-origin-when-cross-origin'), ('Content-Security-Policy', "default-src 'none'"), ('Content-Encoding', 'gzip'), ('X-GitHub-Request-Id', 'F9D6:235E:3C00DD:772BC6:5BD2AE02')] +[{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"atduskgreg","id":165,"node_id":"MDQ6VXNlcjE2NQ==","avatar_url":"https://avatars1.githubusercontent.com/u/165?v=4","gravatar_id":"","url":"https://api.github.com/users/atduskgreg","html_url":"https://github.com/atduskgreg","followers_url":"https://api.github.com/users/atduskgreg/followers","following_url":"https://api.github.com/users/atduskgreg/following{/other_user}","gists_url":"https://api.github.com/users/atduskgreg/gists{/gist_id}","starred_url":"https://api.github.com/users/atduskgreg/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/atduskgreg/subscriptions","organizations_url":"https://api.github.com/users/atduskgreg/orgs","repos_url":"https://api.github.com/users/atduskgreg/repos","events_url":"https://api.github.com/users/atduskgreg/events{/privacy}","received_events_url":"https://api.github.com/users/atduskgreg/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"DocSavage","id":185,"node_id":"MDQ6VXNlcjE4NQ==","avatar_url":"https://avatars1.githubusercontent.com/u/185?v=4","gravatar_id":"","url":"https://api.github.com/users/DocSavage","html_url":"https://github.com/DocSavage","followers_url":"https://api.github.com/users/DocSavage/followers","following_url":"https://api.github.com/users/DocSavage/following{/other_user}","gists_url":"https://api.github.com/users/DocSavage/gists{/gist_id}","starred_url":"https://api.github.com/users/DocSavage/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/DocSavage/subscriptions","organizations_url":"https://api.github.com/users/DocSavage/orgs","repos_url":"https://api.github.com/users/DocSavage/repos","events_url":"https://api.github.com/users/DocSavage/events{/privacy}","received_events_url":"https://api.github.com/users/DocSavage/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"dfl","id":282,"node_id":"MDQ6VXNlcjI4Mg==","avatar_url":"https://avatars0.githubusercontent.com/u/282?v=4","gravatar_id":"","url":"https://api.github.com/users/dfl","html_url":"https://github.com/dfl","followers_url":"https://api.github.com/users/dfl/followers","following_url":"https://api.github.com/users/dfl/following{/other_user}","gists_url":"https://api.github.com/users/dfl/gists{/gist_id}","starred_url":"https://api.github.com/users/dfl/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/dfl/subscriptions","organizations_url":"https://api.github.com/users/dfl/orgs","repos_url":"https://api.github.com/users/dfl/repos","events_url":"https://api.github.com/users/dfl/events{/privacy}","received_events_url":"https://api.github.com/users/dfl/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"norio","id":307,"node_id":"MDQ6VXNlcjMwNw==","avatar_url":"https://avatars1.githubusercontent.com/u/307?v=4","gravatar_id":"","url":"https://api.github.com/users/norio","html_url":"https://github.com/norio","followers_url":"https://api.github.com/users/norio/followers","following_url":"https://api.github.com/users/norio/following{/other_user}","gists_url":"https://api.github.com/users/norio/gists{/gist_id}","starred_url":"https://api.github.com/users/norio/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/norio/subscriptions","organizations_url":"https://api.github.com/users/norio/orgs","repos_url":"https://api.github.com/users/norio/repos","events_url":"https://api.github.com/users/norio/events{/privacy}","received_events_url":"https://api.github.com/users/norio/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"auser","id":529,"node_id":"MDQ6VXNlcjUyOQ==","avatar_url":"https://avatars1.githubusercontent.com/u/529?v=4","gravatar_id":"","url":"https://api.github.com/users/auser","html_url":"https://github.com/auser","followers_url":"https://api.github.com/users/auser/followers","following_url":"https://api.github.com/users/auser/following{/other_user}","gists_url":"https://api.github.com/users/auser/gists{/gist_id}","starred_url":"https://api.github.com/users/auser/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/auser/subscriptions","organizations_url":"https://api.github.com/users/auser/orgs","repos_url":"https://api.github.com/users/auser/repos","events_url":"https://api.github.com/users/auser/events{/privacy}","received_events_url":"https://api.github.com/users/auser/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"keeran","id":848,"node_id":"MDQ6VXNlcjg0OA==","avatar_url":"https://avatars0.githubusercontent.com/u/848?v=4","gravatar_id":"","url":"https://api.github.com/users/keeran","html_url":"https://github.com/keeran","followers_url":"https://api.github.com/users/keeran/followers","following_url":"https://api.github.com/users/keeran/following{/other_user}","gists_url":"https://api.github.com/users/keeran/gists{/gist_id}","starred_url":"https://api.github.com/users/keeran/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/keeran/subscriptions","organizations_url":"https://api.github.com/users/keeran/orgs","repos_url":"https://api.github.com/users/keeran/repos","events_url":"https://api.github.com/users/keeran/events{/privacy}","received_events_url":"https://api.github.com/users/keeran/received_events","type":"User","site_admin":true}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"antramm","id":917,"node_id":"MDQ6VXNlcjkxNw==","avatar_url":"https://avatars2.githubusercontent.com/u/917?v=4","gravatar_id":"","url":"https://api.github.com/users/antramm","html_url":"https://github.com/antramm","followers_url":"https://api.github.com/users/antramm/followers","following_url":"https://api.github.com/users/antramm/following{/other_user}","gists_url":"https://api.github.com/users/antramm/gists{/gist_id}","starred_url":"https://api.github.com/users/antramm/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/antramm/subscriptions","organizations_url":"https://api.github.com/users/antramm/orgs","repos_url":"https://api.github.com/users/antramm/repos","events_url":"https://api.github.com/users/antramm/events{/privacy}","received_events_url":"https://api.github.com/users/antramm/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"ludwig","id":1056,"node_id":"MDQ6VXNlcjEwNTY=","avatar_url":"https://avatars0.githubusercontent.com/u/1056?v=4","gravatar_id":"","url":"https://api.github.com/users/ludwig","html_url":"https://github.com/ludwig","followers_url":"https://api.github.com/users/ludwig/followers","following_url":"https://api.github.com/users/ludwig/following{/other_user}","gists_url":"https://api.github.com/users/ludwig/gists{/gist_id}","starred_url":"https://api.github.com/users/ludwig/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/ludwig/subscriptions","organizations_url":"https://api.github.com/users/ludwig/orgs","repos_url":"https://api.github.com/users/ludwig/repos","events_url":"https://api.github.com/users/ludwig/events{/privacy}","received_events_url":"https://api.github.com/users/ludwig/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"mk","id":1187,"node_id":"MDQ6VXNlcjExODc=","avatar_url":"https://avatars2.githubusercontent.com/u/1187?v=4","gravatar_id":"","url":"https://api.github.com/users/mk","html_url":"https://github.com/mk","followers_url":"https://api.github.com/users/mk/followers","following_url":"https://api.github.com/users/mk/following{/other_user}","gists_url":"https://api.github.com/users/mk/gists{/gist_id}","starred_url":"https://api.github.com/users/mk/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/mk/subscriptions","organizations_url":"https://api.github.com/users/mk/orgs","repos_url":"https://api.github.com/users/mk/repos","events_url":"https://api.github.com/users/mk/events{/privacy}","received_events_url":"https://api.github.com/users/mk/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"reedlaw","id":1344,"node_id":"MDQ6VXNlcjEzNDQ=","avatar_url":"https://avatars0.githubusercontent.com/u/1344?v=4","gravatar_id":"","url":"https://api.github.com/users/reedlaw","html_url":"https://github.com/reedlaw","followers_url":"https://api.github.com/users/reedlaw/followers","following_url":"https://api.github.com/users/reedlaw/following{/other_user}","gists_url":"https://api.github.com/users/reedlaw/gists{/gist_id}","starred_url":"https://api.github.com/users/reedlaw/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/reedlaw/subscriptions","organizations_url":"https://api.github.com/users/reedlaw/orgs","repos_url":"https://api.github.com/users/reedlaw/repos","events_url":"https://api.github.com/users/reedlaw/events{/privacy}","received_events_url":"https://api.github.com/users/reedlaw/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"esiegel","id":1390,"node_id":"MDQ6VXNlcjEzOTA=","avatar_url":"https://avatars2.githubusercontent.com/u/1390?v=4","gravatar_id":"","url":"https://api.github.com/users/esiegel","html_url":"https://github.com/esiegel","followers_url":"https://api.github.com/users/esiegel/followers","following_url":"https://api.github.com/users/esiegel/following{/other_user}","gists_url":"https://api.github.com/users/esiegel/gists{/gist_id}","starred_url":"https://api.github.com/users/esiegel/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/esiegel/subscriptions","organizations_url":"https://api.github.com/users/esiegel/orgs","repos_url":"https://api.github.com/users/esiegel/repos","events_url":"https://api.github.com/users/esiegel/events{/privacy}","received_events_url":"https://api.github.com/users/esiegel/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"gaubert","id":1482,"node_id":"MDQ6VXNlcjE0ODI=","avatar_url":"https://avatars1.githubusercontent.com/u/1482?v=4","gravatar_id":"","url":"https://api.github.com/users/gaubert","html_url":"https://github.com/gaubert","followers_url":"https://api.github.com/users/gaubert/followers","following_url":"https://api.github.com/users/gaubert/following{/other_user}","gists_url":"https://api.github.com/users/gaubert/gists{/gist_id}","starred_url":"https://api.github.com/users/gaubert/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/gaubert/subscriptions","organizations_url":"https://api.github.com/users/gaubert/orgs","repos_url":"https://api.github.com/users/gaubert/repos","events_url":"https://api.github.com/users/gaubert/events{/privacy}","received_events_url":"https://api.github.com/users/gaubert/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"pushkar","id":1484,"node_id":"MDQ6VXNlcjE0ODQ=","avatar_url":"https://avatars1.githubusercontent.com/u/1484?v=4","gravatar_id":"","url":"https://api.github.com/users/pushkar","html_url":"https://github.com/pushkar","followers_url":"https://api.github.com/users/pushkar/followers","following_url":"https://api.github.com/users/pushkar/following{/other_user}","gists_url":"https://api.github.com/users/pushkar/gists{/gist_id}","starred_url":"https://api.github.com/users/pushkar/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/pushkar/subscriptions","organizations_url":"https://api.github.com/users/pushkar/orgs","repos_url":"https://api.github.com/users/pushkar/repos","events_url":"https://api.github.com/users/pushkar/events{/privacy}","received_events_url":"https://api.github.com/users/pushkar/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"edwardgeorge","id":1572,"node_id":"MDQ6VXNlcjE1NzI=","avatar_url":"https://avatars0.githubusercontent.com/u/1572?v=4","gravatar_id":"","url":"https://api.github.com/users/edwardgeorge","html_url":"https://github.com/edwardgeorge","followers_url":"https://api.github.com/users/edwardgeorge/followers","following_url":"https://api.github.com/users/edwardgeorge/following{/other_user}","gists_url":"https://api.github.com/users/edwardgeorge/gists{/gist_id}","starred_url":"https://api.github.com/users/edwardgeorge/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/edwardgeorge/subscriptions","organizations_url":"https://api.github.com/users/edwardgeorge/orgs","repos_url":"https://api.github.com/users/edwardgeorge/repos","events_url":"https://api.github.com/users/edwardgeorge/events{/privacy}","received_events_url":"https://api.github.com/users/edwardgeorge/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"jonbro","id":1597,"node_id":"MDQ6VXNlcjE1OTc=","avatar_url":"https://avatars0.githubusercontent.com/u/1597?v=4","gravatar_id":"","url":"https://api.github.com/users/jonbro","html_url":"https://github.com/jonbro","followers_url":"https://api.github.com/users/jonbro/followers","following_url":"https://api.github.com/users/jonbro/following{/other_user}","gists_url":"https://api.github.com/users/jonbro/gists{/gist_id}","starred_url":"https://api.github.com/users/jonbro/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/jonbro/subscriptions","organizations_url":"https://api.github.com/users/jonbro/orgs","repos_url":"https://api.github.com/users/jonbro/repos","events_url":"https://api.github.com/users/jonbro/events{/privacy}","received_events_url":"https://api.github.com/users/jonbro/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"lrtitze","id":1818,"node_id":"MDQ6VXNlcjE4MTg=","avatar_url":"https://avatars0.githubusercontent.com/u/1818?v=4","gravatar_id":"","url":"https://api.github.com/users/lrtitze","html_url":"https://github.com/lrtitze","followers_url":"https://api.github.com/users/lrtitze/followers","following_url":"https://api.github.com/users/lrtitze/following{/other_user}","gists_url":"https://api.github.com/users/lrtitze/gists{/gist_id}","starred_url":"https://api.github.com/users/lrtitze/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/lrtitze/subscriptions","organizations_url":"https://api.github.com/users/lrtitze/orgs","repos_url":"https://api.github.com/users/lrtitze/repos","events_url":"https://api.github.com/users/lrtitze/events{/privacy}","received_events_url":"https://api.github.com/users/lrtitze/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"sroske","id":2015,"node_id":"MDQ6VXNlcjIwMTU=","avatar_url":"https://avatars1.githubusercontent.com/u/2015?v=4","gravatar_id":"","url":"https://api.github.com/users/sroske","html_url":"https://github.com/sroske","followers_url":"https://api.github.com/users/sroske/followers","following_url":"https://api.github.com/users/sroske/following{/other_user}","gists_url":"https://api.github.com/users/sroske/gists{/gist_id}","starred_url":"https://api.github.com/users/sroske/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/sroske/subscriptions","organizations_url":"https://api.github.com/users/sroske/orgs","repos_url":"https://api.github.com/users/sroske/repos","events_url":"https://api.github.com/users/sroske/events{/privacy}","received_events_url":"https://api.github.com/users/sroske/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"doubledare","id":2166,"node_id":"MDQ6VXNlcjIxNjY=","avatar_url":"https://avatars1.githubusercontent.com/u/2166?v=4","gravatar_id":"","url":"https://api.github.com/users/doubledare","html_url":"https://github.com/doubledare","followers_url":"https://api.github.com/users/doubledare/followers","following_url":"https://api.github.com/users/doubledare/following{/other_user}","gists_url":"https://api.github.com/users/doubledare/gists{/gist_id}","starred_url":"https://api.github.com/users/doubledare/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/doubledare/subscriptions","organizations_url":"https://api.github.com/users/doubledare/orgs","repos_url":"https://api.github.com/users/doubledare/repos","events_url":"https://api.github.com/users/doubledare/events{/privacy}","received_events_url":"https://api.github.com/users/doubledare/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"JamesHarrison","id":2263,"node_id":"MDQ6VXNlcjIyNjM=","avatar_url":"https://avatars0.githubusercontent.com/u/2263?v=4","gravatar_id":"","url":"https://api.github.com/users/JamesHarrison","html_url":"https://github.com/JamesHarrison","followers_url":"https://api.github.com/users/JamesHarrison/followers","following_url":"https://api.github.com/users/JamesHarrison/following{/other_user}","gists_url":"https://api.github.com/users/JamesHarrison/gists{/gist_id}","starred_url":"https://api.github.com/users/JamesHarrison/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/JamesHarrison/subscriptions","organizations_url":"https://api.github.com/users/JamesHarrison/orgs","repos_url":"https://api.github.com/users/JamesHarrison/repos","events_url":"https://api.github.com/users/JamesHarrison/events{/privacy}","received_events_url":"https://api.github.com/users/JamesHarrison/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"zemariamm","id":2427,"node_id":"MDQ6VXNlcjI0Mjc=","avatar_url":"https://avatars0.githubusercontent.com/u/2427?v=4","gravatar_id":"","url":"https://api.github.com/users/zemariamm","html_url":"https://github.com/zemariamm","followers_url":"https://api.github.com/users/zemariamm/followers","following_url":"https://api.github.com/users/zemariamm/following{/other_user}","gists_url":"https://api.github.com/users/zemariamm/gists{/gist_id}","starred_url":"https://api.github.com/users/zemariamm/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/zemariamm/subscriptions","organizations_url":"https://api.github.com/users/zemariamm/orgs","repos_url":"https://api.github.com/users/zemariamm/repos","events_url":"https://api.github.com/users/zemariamm/events{/privacy}","received_events_url":"https://api.github.com/users/zemariamm/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"tgittos","id":2472,"node_id":"MDQ6VXNlcjI0NzI=","avatar_url":"https://avatars3.githubusercontent.com/u/2472?v=4","gravatar_id":"","url":"https://api.github.com/users/tgittos","html_url":"https://github.com/tgittos","followers_url":"https://api.github.com/users/tgittos/followers","following_url":"https://api.github.com/users/tgittos/following{/other_user}","gists_url":"https://api.github.com/users/tgittos/gists{/gist_id}","starred_url":"https://api.github.com/users/tgittos/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/tgittos/subscriptions","organizations_url":"https://api.github.com/users/tgittos/orgs","repos_url":"https://api.github.com/users/tgittos/repos","events_url":"https://api.github.com/users/tgittos/events{/privacy}","received_events_url":"https://api.github.com/users/tgittos/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"lhl","id":2581,"node_id":"MDQ6VXNlcjI1ODE=","avatar_url":"https://avatars2.githubusercontent.com/u/2581?v=4","gravatar_id":"","url":"https://api.github.com/users/lhl","html_url":"https://github.com/lhl","followers_url":"https://api.github.com/users/lhl/followers","following_url":"https://api.github.com/users/lhl/following{/other_user}","gists_url":"https://api.github.com/users/lhl/gists{/gist_id}","starred_url":"https://api.github.com/users/lhl/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/lhl/subscriptions","organizations_url":"https://api.github.com/users/lhl/orgs","repos_url":"https://api.github.com/users/lhl/repos","events_url":"https://api.github.com/users/lhl/events{/privacy}","received_events_url":"https://api.github.com/users/lhl/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"gabriel","id":2669,"node_id":"MDQ6VXNlcjI2Njk=","avatar_url":"https://avatars2.githubusercontent.com/u/2669?v=4","gravatar_id":"","url":"https://api.github.com/users/gabriel","html_url":"https://github.com/gabriel","followers_url":"https://api.github.com/users/gabriel/followers","following_url":"https://api.github.com/users/gabriel/following{/other_user}","gists_url":"https://api.github.com/users/gabriel/gists{/gist_id}","starred_url":"https://api.github.com/users/gabriel/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/gabriel/subscriptions","organizations_url":"https://api.github.com/users/gabriel/orgs","repos_url":"https://api.github.com/users/gabriel/repos","events_url":"https://api.github.com/users/gabriel/events{/privacy}","received_events_url":"https://api.github.com/users/gabriel/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"paulreimer","id":2677,"node_id":"MDQ6VXNlcjI2Nzc=","avatar_url":"https://avatars2.githubusercontent.com/u/2677?v=4","gravatar_id":"","url":"https://api.github.com/users/paulreimer","html_url":"https://github.com/paulreimer","followers_url":"https://api.github.com/users/paulreimer/followers","following_url":"https://api.github.com/users/paulreimer/following{/other_user}","gists_url":"https://api.github.com/users/paulreimer/gists{/gist_id}","starred_url":"https://api.github.com/users/paulreimer/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/paulreimer/subscriptions","organizations_url":"https://api.github.com/users/paulreimer/orgs","repos_url":"https://api.github.com/users/paulreimer/repos","events_url":"https://api.github.com/users/paulreimer/events{/privacy}","received_events_url":"https://api.github.com/users/paulreimer/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"kevinchiu","id":2712,"node_id":"MDQ6VXNlcjI3MTI=","avatar_url":"https://avatars3.githubusercontent.com/u/2712?v=4","gravatar_id":"","url":"https://api.github.com/users/kevinchiu","html_url":"https://github.com/kevinchiu","followers_url":"https://api.github.com/users/kevinchiu/followers","following_url":"https://api.github.com/users/kevinchiu/following{/other_user}","gists_url":"https://api.github.com/users/kevinchiu/gists{/gist_id}","starred_url":"https://api.github.com/users/kevinchiu/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/kevinchiu/subscriptions","organizations_url":"https://api.github.com/users/kevinchiu/orgs","repos_url":"https://api.github.com/users/kevinchiu/repos","events_url":"https://api.github.com/users/kevinchiu/events{/privacy}","received_events_url":"https://api.github.com/users/kevinchiu/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"kumekay","id":2738,"node_id":"MDQ6VXNlcjI3Mzg=","avatar_url":"https://avatars1.githubusercontent.com/u/2738?v=4","gravatar_id":"","url":"https://api.github.com/users/kumekay","html_url":"https://github.com/kumekay","followers_url":"https://api.github.com/users/kumekay/followers","following_url":"https://api.github.com/users/kumekay/following{/other_user}","gists_url":"https://api.github.com/users/kumekay/gists{/gist_id}","starred_url":"https://api.github.com/users/kumekay/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/kumekay/subscriptions","organizations_url":"https://api.github.com/users/kumekay/orgs","repos_url":"https://api.github.com/users/kumekay/repos","events_url":"https://api.github.com/users/kumekay/events{/privacy}","received_events_url":"https://api.github.com/users/kumekay/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"peej","id":2858,"node_id":"MDQ6VXNlcjI4NTg=","avatar_url":"https://avatars1.githubusercontent.com/u/2858?v=4","gravatar_id":"","url":"https://api.github.com/users/peej","html_url":"https://github.com/peej","followers_url":"https://api.github.com/users/peej/followers","following_url":"https://api.github.com/users/peej/following{/other_user}","gists_url":"https://api.github.com/users/peej/gists{/gist_id}","starred_url":"https://api.github.com/users/peej/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/peej/subscriptions","organizations_url":"https://api.github.com/users/peej/orgs","repos_url":"https://api.github.com/users/peej/repos","events_url":"https://api.github.com/users/peej/events{/privacy}","received_events_url":"https://api.github.com/users/peej/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"stinie","id":3000,"node_id":"MDQ6VXNlcjMwMDA=","avatar_url":"https://avatars2.githubusercontent.com/u/3000?v=4","gravatar_id":"","url":"https://api.github.com/users/stinie","html_url":"https://github.com/stinie","followers_url":"https://api.github.com/users/stinie/followers","following_url":"https://api.github.com/users/stinie/following{/other_user}","gists_url":"https://api.github.com/users/stinie/gists{/gist_id}","starred_url":"https://api.github.com/users/stinie/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/stinie/subscriptions","organizations_url":"https://api.github.com/users/stinie/orgs","repos_url":"https://api.github.com/users/stinie/repos","events_url":"https://api.github.com/users/stinie/events{/privacy}","received_events_url":"https://api.github.com/users/stinie/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"subblue","id":3010,"node_id":"MDQ6VXNlcjMwMTA=","avatar_url":"https://avatars3.githubusercontent.com/u/3010?v=4","gravatar_id":"","url":"https://api.github.com/users/subblue","html_url":"https://github.com/subblue","followers_url":"https://api.github.com/users/subblue/followers","following_url":"https://api.github.com/users/subblue/following{/other_user}","gists_url":"https://api.github.com/users/subblue/gists{/gist_id}","starred_url":"https://api.github.com/users/subblue/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/subblue/subscriptions","organizations_url":"https://api.github.com/users/subblue/orgs","repos_url":"https://api.github.com/users/subblue/repos","events_url":"https://api.github.com/users/subblue/events{/privacy}","received_events_url":"https://api.github.com/users/subblue/received_events","type":"User","site_admin":false}},{"starred_at":"2009-10-21T21:55:54Z","user":{"login":"mineiro","id":3123,"node_id":"MDQ6VXNlcjMxMjM=","avatar_url":"https://avatars1.githubusercontent.com/u/3123?v=4","gravatar_id":"","url":"https://api.github.com/users/mineiro","html_url":"https://github.com/mineiro","followers_url":"https://api.github.com/users/mineiro/followers","following_url":"https://api.github.com/users/mineiro/following{/other_user}","gists_url":"https://api.github.com/users/mineiro/gists{/gist_id}","starred_url":"https://api.github.com/users/mineiro/starred{/owner}{/repo}","subscriptions_url":"https://api.github.com/users/mineiro/subscriptions","organizations_url":"https://api.github.com/users/mineiro/orgs","repos_url":"https://api.github.com/users/mineiro/repos","events_url":"https://api.github.com/users/mineiro/events{/privacy}","received_events_url":"https://api.github.com/users/mineiro/received_events","type":"User","site_admin":false}}] +
Mailu__Mailu-1196
refining the default password scheme In https://github.com/Mailu/Mailu/pull/647 the default scheme was switched to BCRYPT. The argumentation was: > bcrypt is fast¹ and secure[...] While it's actually secure, bcypt is not fast. It is _designed_ to be slow - to consume a very high amount of CPU resources to make brute-force attacs expensive. Unfortunately, such a password scheme is not a good choice for a stateless system where many auth checks are done in a very short time. The current bcrypt strength of 12 consumes ~0.7s of CPU time for each authentication. This might work for low-volume sites but can easily max out multiple cpus if there's some user traffic. We should default to something alternative like PBKDF2 that ensures fast response times for auth requests.
[ { "content": "import os\n\nfrom socrate import system\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '0.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'PASSWORD_SCHEME': 'BLF-CRYPT',\n 'LOG_LEVEL': 'WARNING',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'ANTISPAM': 'none',\n 'HOST_ANTISPAM': 'antispam:11334',\n 'WEBMAIL': 'none',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n if self.config[\"ANTISPAM\"] != \"none\":\n self.config[\"ANTISPAM_ADDRESS\"] = self.get_host_address(\"ANTISPAM\")\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(os.environ.get(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py" } ]
[ { "content": "import os\n\nfrom socrate import system\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '0.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'TLS_FLAVOR': 'cert',\n 'AUTH_RATELIMIT': '10/minute;1000/hour',\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n # Advanced settings\n 'PASSWORD_SCHEME': 'PBKDF2',\n 'LOG_LEVEL': 'WARNING',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'ANTISPAM': 'none',\n 'HOST_ANTISPAM': 'antispam:11334',\n 'WEBMAIL': 'none',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager(dict):\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if '{}_ADDRESS'.format(name) in os.environ:\n return os.environ.get('{}_ADDRESS'.format(name))\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config['HOST_{}'.format(name)])\n\n def resolve_hosts(self):\n self.config[\"IMAP_ADDRESS\"] = self.get_host_address(\"IMAP\")\n self.config[\"POP3_ADDRESS\"] = self.get_host_address(\"POP3\")\n self.config[\"AUTHSMTP_ADDRESS\"] = self.get_host_address(\"AUTHSMTP\")\n self.config[\"SMTP_ADDRESS\"] = self.get_host_address(\"SMTP\")\n self.config[\"REDIS_ADDRESS\"] = self.get_host_address(\"REDIS\")\n if self.config[\"WEBMAIL\"] != \"none\":\n self.config[\"WEBMAIL_ADDRESS\"] = self.get_host_address(\"WEBMAIL\")\n if self.config[\"ANTISPAM\"] != \"none\":\n self.config[\"ANTISPAM_ADDRESS\"] = self.get_host_address(\"ANTISPAM\")\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(os.environ.get(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = 'redis://{0}/2'.format(self.config['REDIS_ADDRESS'])\n self.config['QUOTA_STORAGE_URL'] = 'redis://{0}/1'.format(self.config['REDIS_ADDRESS'])\n # update the app config itself\n app.config = self\n\n def setdefault(self, key, value):\n if key not in self.config:\n self.config[key] = value\n return self.config[key]\n\n def get(self, *args):\n return self.config.get(*args)\n\n def keys(self):\n return self.config.keys()\n\n def __getitem__(self, key):\n return self.config.get(key)\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, key):\n return key in self.config\n", "path": "core/admin/mailu/configuration.py" } ]
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py index f01e1bb96..05fad1733 100644 --- a/core/admin/mailu/configuration.py +++ b/core/admin/mailu/configuration.py @@ -51,7 +51,7 @@ 'RECAPTCHA_PUBLIC_KEY': '', 'RECAPTCHA_PRIVATE_KEY': '', # Advanced settings - 'PASSWORD_SCHEME': 'BLF-CRYPT', + 'PASSWORD_SCHEME': 'PBKDF2', 'LOG_LEVEL': 'WARNING', # Host settings 'HOST_IMAP': 'imap', diff --git a/setup/flavors/compose/mailu.env b/setup/flavors/compose/mailu.env index 3228fe410..180239c34 100644 --- a/setup/flavors/compose/mailu.env +++ b/setup/flavors/compose/mailu.env @@ -143,8 +143,8 @@ DOMAIN_REGISTRATION=true COMPOSE_PROJECT_NAME={{ compose_project_name or 'mailu' }} # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME={{ password_scheme or 'BLF-CRYPT' }} +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME={{ password_scheme or 'PBKDF2' }} # Header to take the real ip from REAL_IP_HEADER={{ real_ip_header }} diff --git a/tests/compose/core/mailu.env b/tests/compose/core/mailu.env index dd7bd25f1..b13e57c59 100644 --- a/tests/compose/core/mailu.env +++ b/tests/compose/core/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= @@ -144,4 +144,4 @@ REJECT_UNLISTED_RECIPIENT= # Test for initial admin create INITIAL_ADMIN_ACCOUNT=admin INITIAL_ADMIN_DOMAIN=mailu.io -INITIAL_ADMIN_PW=FooBar \ No newline at end of file +INITIAL_ADMIN_PW=FooBar diff --git a/tests/compose/fetchmail/mailu.env b/tests/compose/fetchmail/mailu.env index c91a6deb5..636a09a95 100644 --- a/tests/compose/fetchmail/mailu.env +++ b/tests/compose/fetchmail/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= diff --git a/tests/compose/filters/mailu.env b/tests/compose/filters/mailu.env index e165fee2d..b6d5ca8fe 100644 --- a/tests/compose/filters/mailu.env +++ b/tests/compose/filters/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= diff --git a/tests/compose/rainloop/mailu.env b/tests/compose/rainloop/mailu.env index 65fef5c8c..9c31c8bb8 100644 --- a/tests/compose/rainloop/mailu.env +++ b/tests/compose/rainloop/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= diff --git a/tests/compose/roundcube/mailu.env b/tests/compose/roundcube/mailu.env index cadaa84a0..dc503268a 100644 --- a/tests/compose/roundcube/mailu.env +++ b/tests/compose/roundcube/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= diff --git a/tests/compose/webdav/mailu.env b/tests/compose/webdav/mailu.env index 7141bf1fa..90fb25b1d 100644 --- a/tests/compose/webdav/mailu.env +++ b/tests/compose/webdav/mailu.env @@ -129,8 +129,8 @@ WEBSITE=https://mailu.io COMPOSE_PROJECT_NAME=mailu # Default password scheme used for newly created accounts and changed passwords -# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) -PASSWORD_SCHEME=BLF-CRYPT +# (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT) +PASSWORD_SCHEME=PBKDF2 # Header to take the real ip from REAL_IP_HEADER= diff --git a/towncrier/newsfragments/1194.feature b/towncrier/newsfragments/1194.feature new file mode 100644 index 000000000..ee40311a6 --- /dev/null +++ b/towncrier/newsfragments/1194.feature @@ -0,0 +1 @@ +Change default password scheme to PBKDF2
netbox-community__netbox-14751
related_name should not be translated ### Deployment Type Self-hosted ### NetBox Version v3.7.0 ### Python Version 3.8 ### Steps to Reproduce If you enable the translation function, an error appears. This is due to the fact that in `netbox/dcim/models/device_components.py` in the `DeviceBay` class the translation was made `related_name`. ### Expected Behavior remove translation for `related_name`. ### Observed Behavior Nonetype Object Has No Attribute model
[ { "content": "from functools import cached_property\n\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.db.models import Sum\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom dcim.choices import *\nfrom dcim.constants import *\nfrom dcim.fields import MACAddressField, WWNField\nfrom netbox.models import OrganizationalModel, NetBoxModel\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField, NaturalOrderingField\nfrom utilities.mptt import TreeManager\nfrom utilities.ordering import naturalize_interface\nfrom utilities.query_functions import CollateAsChar\nfrom utilities.tracking import TrackingModelMixin\nfrom wireless.choices import *\nfrom wireless.utils import get_channel_attr\n\n\n__all__ = (\n 'BaseInterface',\n 'CabledObjectModel',\n 'ConsolePort',\n 'ConsoleServerPort',\n 'DeviceBay',\n 'FrontPort',\n 'Interface',\n 'InventoryItem',\n 'InventoryItemRole',\n 'ModuleBay',\n 'PathEndpoint',\n 'PowerOutlet',\n 'PowerPort',\n 'RearPort',\n)\n\n\nclass ComponentModel(NetBoxModel):\n \"\"\"\n An abstract model inherited by any model which has a parent Device.\n \"\"\"\n device = models.ForeignKey(\n to='dcim.Device',\n on_delete=models.CASCADE,\n related_name='%(class)ss'\n )\n name = models.CharField(\n verbose_name=_('name'),\n max_length=64\n )\n _name = NaturalOrderingField(\n target_field='name',\n max_length=100,\n blank=True\n )\n label = models.CharField(\n verbose_name=_('label'),\n max_length=64,\n blank=True,\n help_text=_('Physical label')\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True\n )\n\n class Meta:\n abstract = True\n ordering = ('device', '_name')\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'name'),\n name='%(app_label)s_%(class)s_unique_device_name'\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Cache the original Device ID for reference under clean()\n self._original_device = self.__dict__.get('device_id')\n\n def __str__(self):\n if self.label:\n return f\"{self.name} ({self.label})\"\n return self.name\n\n def to_objectchange(self, action):\n objectchange = super().to_objectchange(action)\n objectchange.related_object = self.device\n return objectchange\n\n def clean(self):\n super().clean()\n\n # Check list of Modules that allow device field to be changed\n if (type(self) not in [InventoryItem]) and (self.pk is not None) and (self._original_device != self.device_id):\n raise ValidationError({\n \"device\": _(\"Components cannot be moved to a different device.\")\n })\n\n @property\n def parent_object(self):\n return self.device\n\n\nclass ModularComponentModel(ComponentModel):\n module = models.ForeignKey(\n to='dcim.Module',\n on_delete=models.CASCADE,\n related_name='%(class)ss',\n blank=True,\n null=True\n )\n inventory_items = GenericRelation(\n to='dcim.InventoryItem',\n content_type_field='component_type',\n object_id_field='component_id'\n )\n\n class Meta(ComponentModel.Meta):\n abstract = True\n\n\nclass CabledObjectModel(models.Model):\n \"\"\"\n An abstract model inherited by all models to which a Cable can terminate. Provides the `cable` and `cable_end`\n fields for caching cable associations, as well as `mark_connected` to designate \"fake\" connections.\n \"\"\"\n cable = models.ForeignKey(\n to='dcim.Cable',\n on_delete=models.SET_NULL,\n related_name='+',\n blank=True,\n null=True\n )\n cable_end = models.CharField(\n verbose_name=_('cable end'),\n max_length=1,\n blank=True,\n choices=CableEndChoices\n )\n mark_connected = models.BooleanField(\n verbose_name=_('mark connected'),\n default=False,\n help_text=_('Treat as if a cable is connected')\n )\n\n cable_terminations = GenericRelation(\n to='dcim.CableTermination',\n content_type_field='termination_type',\n object_id_field='termination_id',\n related_query_name='%(class)s',\n )\n\n class Meta:\n abstract = True\n\n def clean(self):\n super().clean()\n\n if self.cable and not self.cable_end:\n raise ValidationError({\n \"cable_end\": _(\"Must specify cable end (A or B) when attaching a cable.\")\n })\n if self.cable_end and not self.cable:\n raise ValidationError({\n \"cable_end\": _(\"Cable end must not be set without a cable.\")\n })\n if self.mark_connected and self.cable:\n raise ValidationError({\n \"mark_connected\": _(\"Cannot mark as connected with a cable attached.\")\n })\n\n @property\n def link(self):\n \"\"\"\n Generic wrapper for a Cable, WirelessLink, or some other relation to a connected termination.\n \"\"\"\n return self.cable\n\n @cached_property\n def link_peers(self):\n if self.cable:\n peers = self.cable.terminations.exclude(cable_end=self.cable_end).prefetch_related('termination')\n return [peer.termination for peer in peers]\n return []\n\n @property\n def _occupied(self):\n return bool(self.mark_connected or self.cable_id)\n\n @property\n def parent_object(self):\n raise NotImplementedError(\n _(\"{class_name} models must declare a parent_object property\").format(class_name=self.__class__.__name__)\n )\n\n @property\n def opposite_cable_end(self):\n if not self.cable_end:\n return None\n return CableEndChoices.SIDE_A if self.cable_end == CableEndChoices.SIDE_B else CableEndChoices.SIDE_B\n\n\nclass PathEndpoint(models.Model):\n \"\"\"\n An abstract model inherited by any CabledObjectModel subclass which represents the end of a CablePath; specifically,\n these include ConsolePort, ConsoleServerPort, PowerPort, PowerOutlet, Interface, and PowerFeed.\n\n `_path` references the CablePath originating from this instance, if any. It is set or cleared by the receivers in\n dcim.signals in response to changes in the cable path, and complements the `origin` GenericForeignKey field on the\n CablePath model. `_path` should not be accessed directly; rather, use the `path` property.\n\n `connected_endpoints()` is a convenience method for returning the destination of the associated CablePath, if any.\n \"\"\"\n _path = models.ForeignKey(\n to='dcim.CablePath',\n on_delete=models.SET_NULL,\n null=True,\n blank=True\n )\n\n class Meta:\n abstract = True\n\n def trace(self):\n origin = self\n path = []\n\n # Construct the complete path (including e.g. bridged interfaces)\n while origin is not None:\n\n if origin._path is None:\n break\n\n path.extend(origin._path.path_objects)\n\n # If the path ends at a non-connected pass-through port, pad out the link and far-end terminations\n if len(path) % 3 == 1:\n path.extend(([], []))\n # If the path ends at a site or provider network, inject a null \"link\" to render an attachment\n elif len(path) % 3 == 2:\n path.insert(-1, [])\n\n # Check for a bridged relationship to continue the trace\n destinations = origin._path.destinations\n if len(destinations) == 1:\n origin = getattr(destinations[0], 'bridge', None)\n else:\n origin = None\n\n # Return the path as a list of three-tuples (A termination(s), cable(s), B termination(s))\n return list(zip(*[iter(path)] * 3))\n\n @property\n def path(self):\n return self._path\n\n @cached_property\n def connected_endpoints(self):\n \"\"\"\n Caching accessor for the attached CablePath's destination (if any)\n \"\"\"\n return self._path.destinations if self._path else []\n\n\n#\n# Console components\n#\n\nclass ConsolePort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical console port within a Device. ConsolePorts connect to ConsoleServerPorts.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=ConsolePortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n speed = models.PositiveIntegerField(\n verbose_name=_('speed'),\n choices=ConsolePortSpeedChoices,\n blank=True,\n null=True,\n help_text=_('Port speed in bits per second')\n )\n\n clone_fields = ('device', 'module', 'type', 'speed')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('console port')\n verbose_name_plural = _('console ports')\n\n def get_absolute_url(self):\n return reverse('dcim:consoleport', kwargs={'pk': self.pk})\n\n\nclass ConsoleServerPort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical port within a Device (typically a designated console server) which provides access to ConsolePorts.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=ConsolePortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n speed = models.PositiveIntegerField(\n verbose_name=_('speed'),\n choices=ConsolePortSpeedChoices,\n blank=True,\n null=True,\n help_text=_('Port speed in bits per second')\n )\n\n clone_fields = ('device', 'module', 'type', 'speed')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('console server port')\n verbose_name_plural = _('console server ports')\n\n def get_absolute_url(self):\n return reverse('dcim:consoleserverport', kwargs={'pk': self.pk})\n\n\n#\n# Power components\n#\n\nclass PowerPort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical power supply (intake) port within a Device. PowerPorts connect to PowerOutlets.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PowerPortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n maximum_draw = models.PositiveIntegerField(\n verbose_name=_('maximum draw'),\n blank=True,\n null=True,\n validators=[MinValueValidator(1)],\n help_text=_(\"Maximum power draw (watts)\")\n )\n allocated_draw = models.PositiveIntegerField(\n verbose_name=_('allocated draw'),\n blank=True,\n null=True,\n validators=[MinValueValidator(1)],\n help_text=_('Allocated power draw (watts)')\n )\n\n clone_fields = ('device', 'module', 'maximum_draw', 'allocated_draw')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('power port')\n verbose_name_plural = _('power ports')\n\n def get_absolute_url(self):\n return reverse('dcim:powerport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n if self.maximum_draw is not None and self.allocated_draw is not None:\n if self.allocated_draw > self.maximum_draw:\n raise ValidationError({\n 'allocated_draw': _(\n \"Allocated draw cannot exceed the maximum draw ({maximum_draw}W).\"\n ).format(maximum_draw=self.maximum_draw)\n })\n\n def get_downstream_powerports(self, leg=None):\n \"\"\"\n Return a queryset of all PowerPorts connected via cable to a child PowerOutlet. For example, in the topology\n below, PP1.get_downstream_powerports() would return PP2-4.\n\n ---- PO1 <---> PP2\n /\n PP1 ------- PO2 <---> PP3\n \\\n ---- PO3 <---> PP4\n\n \"\"\"\n poweroutlets = self.poweroutlets.filter(cable__isnull=False)\n if leg:\n poweroutlets = poweroutlets.filter(feed_leg=leg)\n if not poweroutlets:\n return PowerPort.objects.none()\n\n q = Q()\n for poweroutlet in poweroutlets:\n q |= Q(\n cable=poweroutlet.cable,\n cable_end=poweroutlet.opposite_cable_end\n )\n\n return PowerPort.objects.filter(q)\n\n def get_power_draw(self):\n \"\"\"\n Return the allocated and maximum power draw (in VA) and child PowerOutlet count for this PowerPort.\n \"\"\"\n from dcim.models import PowerFeed\n\n # Calculate aggregate draw of all child power outlets if no numbers have been defined manually\n if self.allocated_draw is None and self.maximum_draw is None:\n utilization = self.get_downstream_powerports().aggregate(\n maximum_draw_total=Sum('maximum_draw'),\n allocated_draw_total=Sum('allocated_draw'),\n )\n ret = {\n 'allocated': utilization['allocated_draw_total'] or 0,\n 'maximum': utilization['maximum_draw_total'] or 0,\n 'outlet_count': self.poweroutlets.count(),\n 'legs': [],\n }\n\n # Calculate per-leg aggregates for three-phase power feeds\n if len(self.link_peers) == 1 and isinstance(self.link_peers[0], PowerFeed) and \\\n self.link_peers[0].phase == PowerFeedPhaseChoices.PHASE_3PHASE:\n for leg, leg_name in PowerOutletFeedLegChoices:\n utilization = self.get_downstream_powerports(leg=leg).aggregate(\n maximum_draw_total=Sum('maximum_draw'),\n allocated_draw_total=Sum('allocated_draw'),\n )\n ret['legs'].append({\n 'name': leg_name,\n 'allocated': utilization['allocated_draw_total'] or 0,\n 'maximum': utilization['maximum_draw_total'] or 0,\n 'outlet_count': self.poweroutlets.filter(feed_leg=leg).count(),\n })\n\n return ret\n\n # Default to administratively defined values\n return {\n 'allocated': self.allocated_draw or 0,\n 'maximum': self.maximum_draw or 0,\n 'outlet_count': self.poweroutlets.count(),\n 'legs': [],\n }\n\n\nclass PowerOutlet(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical power outlet (output) within a Device which provides power to a PowerPort.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PowerOutletTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n power_port = models.ForeignKey(\n to='dcim.PowerPort',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='poweroutlets'\n )\n feed_leg = models.CharField(\n verbose_name=_('feed leg'),\n max_length=50,\n choices=PowerOutletFeedLegChoices,\n blank=True,\n help_text=_('Phase (for three-phase feeds)')\n )\n\n clone_fields = ('device', 'module', 'type', 'power_port', 'feed_leg')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('power outlet')\n verbose_name_plural = _('power outlets')\n\n def get_absolute_url(self):\n return reverse('dcim:poweroutlet', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Validate power port assignment\n if self.power_port and self.power_port.device != self.device:\n raise ValidationError(\n _(\"Parent power port ({power_port}) must belong to the same device\").format(power_port=self.power_port)\n )\n\n\n#\n# Interfaces\n#\n\nclass BaseInterface(models.Model):\n \"\"\"\n Abstract base class for fields shared by dcim.Interface and virtualization.VMInterface.\n \"\"\"\n enabled = models.BooleanField(\n verbose_name=_('enabled'),\n default=True\n )\n mac_address = MACAddressField(\n null=True,\n blank=True,\n verbose_name=_('MAC address')\n )\n mtu = models.PositiveIntegerField(\n blank=True,\n null=True,\n validators=[\n MinValueValidator(INTERFACE_MTU_MIN),\n MaxValueValidator(INTERFACE_MTU_MAX)\n ],\n verbose_name=_('MTU')\n )\n mode = models.CharField(\n verbose_name=_('mode'),\n max_length=50,\n choices=InterfaceModeChoices,\n blank=True,\n help_text=_('IEEE 802.1Q tagging strategy')\n )\n parent = models.ForeignKey(\n to='self',\n on_delete=models.RESTRICT,\n related_name='child_interfaces',\n null=True,\n blank=True,\n verbose_name=_('parent interface')\n )\n bridge = models.ForeignKey(\n to='self',\n on_delete=models.SET_NULL,\n related_name='bridge_interfaces',\n null=True,\n blank=True,\n verbose_name=_('bridge interface')\n )\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n\n # Remove untagged VLAN assignment for non-802.1Q interfaces\n if not self.mode:\n self.untagged_vlan = None\n\n # Only \"tagged\" interfaces may have tagged VLANs assigned. (\"tagged all\" implies all VLANs are assigned.)\n if self.pk and self.mode != InterfaceModeChoices.MODE_TAGGED:\n self.tagged_vlans.clear()\n\n return super().save(*args, **kwargs)\n\n @property\n def tunnel_termination(self):\n return self.tunnel_terminations.first()\n\n @property\n def count_ipaddresses(self):\n return self.ip_addresses.count()\n\n @property\n def count_fhrp_groups(self):\n return self.fhrp_group_assignments.count()\n\n\nclass Interface(ModularComponentModel, BaseInterface, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A network interface within a Device. A physical Interface can connect to exactly one other Interface.\n \"\"\"\n # Override ComponentModel._name to specify naturalize_interface function\n _name = NaturalOrderingField(\n target_field='name',\n naturalize_function=naturalize_interface,\n max_length=100,\n blank=True\n )\n vdcs = models.ManyToManyField(\n to='dcim.VirtualDeviceContext',\n related_name='interfaces'\n )\n lag = models.ForeignKey(\n to='self',\n on_delete=models.SET_NULL,\n related_name='member_interfaces',\n null=True,\n blank=True,\n verbose_name=_('parent LAG')\n )\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=InterfaceTypeChoices\n )\n mgmt_only = models.BooleanField(\n default=False,\n verbose_name=_('management only'),\n help_text=_('This interface is used only for out-of-band management')\n )\n speed = models.PositiveIntegerField(\n blank=True,\n null=True,\n verbose_name=_('speed (Kbps)')\n )\n duplex = models.CharField(\n verbose_name=_('duplex'),\n max_length=50,\n blank=True,\n null=True,\n choices=InterfaceDuplexChoices\n )\n wwn = WWNField(\n null=True,\n blank=True,\n verbose_name=_('WWN'),\n help_text=_('64-bit World Wide Name')\n )\n rf_role = models.CharField(\n max_length=30,\n choices=WirelessRoleChoices,\n blank=True,\n verbose_name=_('wireless role')\n )\n rf_channel = models.CharField(\n max_length=50,\n choices=WirelessChannelChoices,\n blank=True,\n verbose_name=_('wireless channel')\n )\n rf_channel_frequency = models.DecimalField(\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n verbose_name=_('channel frequency (MHz)'),\n help_text=_(\"Populated by selected channel (if set)\")\n )\n rf_channel_width = models.DecimalField(\n max_digits=7,\n decimal_places=3,\n blank=True,\n null=True,\n verbose_name=('channel width (MHz)'),\n help_text=_(\"Populated by selected channel (if set)\")\n )\n tx_power = models.PositiveSmallIntegerField(\n blank=True,\n null=True,\n validators=(MaxValueValidator(127),),\n verbose_name=_('transmit power (dBm)')\n )\n poe_mode = models.CharField(\n max_length=50,\n choices=InterfacePoEModeChoices,\n blank=True,\n verbose_name=_('PoE mode')\n )\n poe_type = models.CharField(\n max_length=50,\n choices=InterfacePoETypeChoices,\n blank=True,\n verbose_name=_('PoE type')\n )\n wireless_link = models.ForeignKey(\n to='wireless.WirelessLink',\n on_delete=models.SET_NULL,\n related_name='+',\n blank=True,\n null=True\n )\n wireless_lans = models.ManyToManyField(\n to='wireless.WirelessLAN',\n related_name='interfaces',\n blank=True,\n verbose_name=_('wireless LANs')\n )\n untagged_vlan = models.ForeignKey(\n to='ipam.VLAN',\n on_delete=models.SET_NULL,\n related_name='interfaces_as_untagged',\n null=True,\n blank=True,\n verbose_name=_('untagged VLAN')\n )\n tagged_vlans = models.ManyToManyField(\n to='ipam.VLAN',\n related_name='interfaces_as_tagged',\n blank=True,\n verbose_name=_('tagged VLANs')\n )\n vrf = models.ForeignKey(\n to='ipam.VRF',\n on_delete=models.SET_NULL,\n related_name='interfaces',\n null=True,\n blank=True,\n verbose_name=_('VRF')\n )\n ip_addresses = GenericRelation(\n to='ipam.IPAddress',\n content_type_field='assigned_object_type',\n object_id_field='assigned_object_id',\n related_query_name='interface'\n )\n fhrp_group_assignments = GenericRelation(\n to='ipam.FHRPGroupAssignment',\n content_type_field='interface_type',\n object_id_field='interface_id',\n related_query_name='+'\n )\n tunnel_terminations = GenericRelation(\n to='vpn.TunnelTermination',\n content_type_field='termination_type',\n object_id_field='termination_id',\n related_query_name='interface'\n )\n l2vpn_terminations = GenericRelation(\n to='vpn.L2VPNTermination',\n content_type_field='assigned_object_type',\n object_id_field='assigned_object_id',\n related_query_name='interface',\n )\n\n clone_fields = (\n 'device', 'module', 'parent', 'bridge', 'lag', 'type', 'mgmt_only', 'mtu', 'mode', 'speed', 'duplex', 'rf_role',\n 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'tx_power', 'poe_mode', 'poe_type', 'vrf',\n )\n\n class Meta(ModularComponentModel.Meta):\n ordering = ('device', CollateAsChar('_name'))\n verbose_name = _('interface')\n verbose_name_plural = _('interfaces')\n\n def get_absolute_url(self):\n return reverse('dcim:interface', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Virtual Interfaces cannot have a Cable attached\n if self.is_virtual and self.cable:\n raise ValidationError({\n 'type': _(\"{display_type} interfaces cannot have a cable attached.\").format(\n display_type=self.get_type_display()\n )\n })\n\n # Virtual Interfaces cannot be marked as connected\n if self.is_virtual and self.mark_connected:\n raise ValidationError({\n 'mark_connected': _(\"{display_type} interfaces cannot be marked as connected.\".format(\n display_type=self.get_type_display())\n )\n })\n\n # Parent validation\n\n # An interface cannot be its own parent\n if self.pk and self.parent_id == self.pk:\n raise ValidationError({'parent': _(\"An interface cannot be its own parent.\")})\n\n # A physical interface cannot have a parent interface\n if self.type != InterfaceTypeChoices.TYPE_VIRTUAL and self.parent is not None:\n raise ValidationError({'parent': _(\"Only virtual interfaces may be assigned to a parent interface.\")})\n\n # An interface's parent must belong to the same device or virtual chassis\n if self.parent and self.parent.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'parent': _(\n \"The selected parent interface ({interface}) belongs to a different device ({device})\"\n ).format(interface=self.parent, device=self.parent.device)\n })\n elif self.parent.device.virtual_chassis != self.parent.virtual_chassis:\n raise ValidationError({\n 'parent': _(\n \"The selected parent interface ({interface}) belongs to {device}, which is not part of \"\n \"virtual chassis {virtual_chassis}.\"\n ).format(\n interface=self.parent,\n device=self.parent_device,\n virtual_chassis=self.device.virtual_chassis\n )\n })\n\n # Bridge validation\n\n # An interface cannot be bridged to itself\n if self.pk and self.bridge_id == self.pk:\n raise ValidationError({'bridge': _(\"An interface cannot be bridged to itself.\")})\n\n # A bridged interface belong to the same device or virtual chassis\n if self.bridge and self.bridge.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'bridge': _(\n \"The selected bridge interface ({bridge}) belongs to a different device ({device}).\"\n ).format(bridge=self.bridge, device=self.bridge.device)\n })\n elif self.bridge.device.virtual_chassis != self.device.virtual_chassis:\n raise ValidationError({\n 'bridge': _(\n \"The selected bridge interface ({interface}) belongs to {device}, which is not part of virtual \"\n \"chassis {virtual_chassis}.\"\n ).format(\n interface=self.bridge, device=self.bridge.device, virtual_chassis=self.device.virtual_chassis\n )\n })\n\n # LAG validation\n\n # A virtual interface cannot have a parent LAG\n if self.type == InterfaceTypeChoices.TYPE_VIRTUAL and self.lag is not None:\n raise ValidationError({'lag': _(\"Virtual interfaces cannot have a parent LAG interface.\")})\n\n # A LAG interface cannot be its own parent\n if self.pk and self.lag_id == self.pk:\n raise ValidationError({'lag': _(\"A LAG interface cannot be its own parent.\")})\n\n # An interface's LAG must belong to the same device or virtual chassis\n if self.lag and self.lag.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'lag': _(\n \"The selected LAG interface ({lag}) belongs to a different device ({device}).\"\n ).format(lag=self.lag, device=self.lag.device)\n })\n elif self.lag.device.virtual_chassis != self.device.virtual_chassis:\n raise ValidationError({\n 'lag': _(\n \"The selected LAG interface ({lag}) belongs to {device}, which is not part of virtual chassis \"\n \"{virtual_chassis}.\".format(\n lag=self.lag, device=self.lag.device, virtual_chassis=self.device.virtual_chassis)\n )\n })\n\n # PoE validation\n\n # Only physical interfaces may have a PoE mode/type assigned\n if self.poe_mode and self.is_virtual:\n raise ValidationError({\n 'poe_mode': _(\"Virtual interfaces cannot have a PoE mode.\")\n })\n if self.poe_type and self.is_virtual:\n raise ValidationError({\n 'poe_type': _(\"Virtual interfaces cannot have a PoE type.\")\n })\n\n # An interface with a PoE type set must also specify a mode\n if self.poe_type and not self.poe_mode:\n raise ValidationError({\n 'poe_type': _(\"Must specify PoE mode when designating a PoE type.\")\n })\n\n # Wireless validation\n\n # RF role & channel may only be set for wireless interfaces\n if self.rf_role and not self.is_wireless:\n raise ValidationError({'rf_role': _(\"Wireless role may be set only on wireless interfaces.\")})\n if self.rf_channel and not self.is_wireless:\n raise ValidationError({'rf_channel': _(\"Channel may be set only on wireless interfaces.\")})\n\n # Validate channel frequency against interface type and selected channel (if any)\n if self.rf_channel_frequency:\n if not self.is_wireless:\n raise ValidationError({\n 'rf_channel_frequency': _(\"Channel frequency may be set only on wireless interfaces.\"),\n })\n if self.rf_channel and self.rf_channel_frequency != get_channel_attr(self.rf_channel, 'frequency'):\n raise ValidationError({\n 'rf_channel_frequency': _(\"Cannot specify custom frequency with channel selected.\"),\n })\n\n # Validate channel width against interface type and selected channel (if any)\n if self.rf_channel_width:\n if not self.is_wireless:\n raise ValidationError({'rf_channel_width': _(\"Channel width may be set only on wireless interfaces.\")})\n if self.rf_channel and self.rf_channel_width != get_channel_attr(self.rf_channel, 'width'):\n raise ValidationError({'rf_channel_width': _(\"Cannot specify custom width with channel selected.\")})\n\n # VLAN validation\n\n # Validate untagged VLAN\n if self.untagged_vlan and self.untagged_vlan.site not in [self.device.site, None]:\n raise ValidationError({\n 'untagged_vlan': _(\n \"The untagged VLAN ({untagged_vlan}) must belong to the same site as the interface's parent \"\n \"device, or it must be global.\"\n ).format(untagged_vlan=self.untagged_vlan)\n })\n\n def save(self, *args, **kwargs):\n\n # Set absolute channel attributes from selected options\n if self.rf_channel and not self.rf_channel_frequency:\n self.rf_channel_frequency = get_channel_attr(self.rf_channel, 'frequency')\n if self.rf_channel and not self.rf_channel_width:\n self.rf_channel_width = get_channel_attr(self.rf_channel, 'width')\n\n super().save(*args, **kwargs)\n\n @property\n def _occupied(self):\n return super()._occupied or bool(self.wireless_link_id)\n\n @property\n def is_wired(self):\n return not self.is_virtual and not self.is_wireless\n\n @property\n def is_virtual(self):\n return self.type in VIRTUAL_IFACE_TYPES\n\n @property\n def is_wireless(self):\n return self.type in WIRELESS_IFACE_TYPES\n\n @property\n def is_lag(self):\n return self.type == InterfaceTypeChoices.TYPE_LAG\n\n @property\n def is_bridge(self):\n return self.type == InterfaceTypeChoices.TYPE_BRIDGE\n\n @property\n def link(self):\n return self.cable or self.wireless_link\n\n @cached_property\n def link_peers(self):\n if self.cable:\n return super().link_peers\n if self.wireless_link:\n # Return the opposite side of the attached wireless link\n if self.wireless_link.interface_a == self:\n return [self.wireless_link.interface_b]\n else:\n return [self.wireless_link.interface_a]\n return []\n\n @property\n def l2vpn_termination(self):\n return self.l2vpn_terminations.first()\n\n\n#\n# Pass-through ports\n#\n\nclass FrontPort(ModularComponentModel, CabledObjectModel, TrackingModelMixin):\n \"\"\"\n A pass-through port on the front of a Device.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PortTypeChoices\n )\n color = ColorField(\n verbose_name=_('color'),\n blank=True\n )\n rear_port = models.ForeignKey(\n to='dcim.RearPort',\n on_delete=models.CASCADE,\n related_name='frontports'\n )\n rear_port_position = models.PositiveSmallIntegerField(\n verbose_name=_('rear port position'),\n default=1,\n validators=[\n MinValueValidator(REARPORT_POSITIONS_MIN),\n MaxValueValidator(REARPORT_POSITIONS_MAX)\n ],\n help_text=_('Mapped position on corresponding rear port')\n )\n\n clone_fields = ('device', 'type', 'color')\n\n class Meta(ModularComponentModel.Meta):\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'name'),\n name='%(app_label)s_%(class)s_unique_device_name'\n ),\n models.UniqueConstraint(\n fields=('rear_port', 'rear_port_position'),\n name='%(app_label)s_%(class)s_unique_rear_port_position'\n ),\n )\n verbose_name = _('front port')\n verbose_name_plural = _('front ports')\n\n def get_absolute_url(self):\n return reverse('dcim:frontport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n if hasattr(self, 'rear_port'):\n\n # Validate rear port assignment\n if self.rear_port.device != self.device:\n raise ValidationError({\n \"rear_port\": _(\n \"Rear port ({rear_port}) must belong to the same device\"\n ).format(rear_port=self.rear_port)\n })\n\n # Validate rear port position assignment\n if self.rear_port_position > self.rear_port.positions:\n raise ValidationError({\n \"rear_port_position\": _(\n \"Invalid rear port position ({rear_port_position}): Rear port {name} has only {positions} \"\n \"positions.\"\n ).format(\n rear_port_position=self.rear_port_position,\n name=self.rear_port.name,\n positions=self.rear_port.positions\n )\n })\n\n\nclass RearPort(ModularComponentModel, CabledObjectModel, TrackingModelMixin):\n \"\"\"\n A pass-through port on the rear of a Device.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PortTypeChoices\n )\n color = ColorField(\n verbose_name=_('color'),\n blank=True\n )\n positions = models.PositiveSmallIntegerField(\n verbose_name=_('positions'),\n default=1,\n validators=[\n MinValueValidator(REARPORT_POSITIONS_MIN),\n MaxValueValidator(REARPORT_POSITIONS_MAX)\n ],\n help_text=_('Number of front ports which may be mapped')\n )\n clone_fields = ('device', 'type', 'color', 'positions')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('rear port')\n verbose_name_plural = _('rear ports')\n\n def get_absolute_url(self):\n return reverse('dcim:rearport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Check that positions count is greater than or equal to the number of associated FrontPorts\n if self.pk:\n frontport_count = self.frontports.count()\n if self.positions < frontport_count:\n raise ValidationError({\n \"positions\": _(\n \"The number of positions cannot be less than the number of mapped front ports \"\n \"({frontport_count})\"\n ).format(frontport_count=frontport_count)\n })\n\n\n#\n# Bays\n#\n\nclass ModuleBay(ComponentModel, TrackingModelMixin):\n \"\"\"\n An empty space within a Device which can house a child device\n \"\"\"\n position = models.CharField(\n verbose_name=_('position'),\n max_length=30,\n blank=True,\n help_text=_('Identifier to reference when renaming installed components')\n )\n\n clone_fields = ('device',)\n\n class Meta(ComponentModel.Meta):\n verbose_name = _('module bay')\n verbose_name_plural = _('module bays')\n\n def get_absolute_url(self):\n return reverse('dcim:modulebay', kwargs={'pk': self.pk})\n\n\nclass DeviceBay(ComponentModel, TrackingModelMixin):\n \"\"\"\n An empty space within a Device which can house a child device\n \"\"\"\n installed_device = models.OneToOneField(\n to='dcim.Device',\n on_delete=models.SET_NULL,\n related_name=_('parent_bay'),\n blank=True,\n null=True\n )\n\n clone_fields = ('device',)\n\n class Meta(ComponentModel.Meta):\n verbose_name = _('device bay')\n verbose_name_plural = _('device bays')\n\n def get_absolute_url(self):\n return reverse('dcim:devicebay', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Validate that the parent Device can have DeviceBays\n if not self.device.device_type.is_parent_device:\n raise ValidationError(_(\"This type of device ({device_type}) does not support device bays.\").format(\n device_type=self.device.device_type\n ))\n\n # Cannot install a device into itself, obviously\n if self.device == self.installed_device:\n raise ValidationError(_(\"Cannot install a device into itself.\"))\n\n # Check that the installed device is not already installed elsewhere\n if self.installed_device:\n current_bay = DeviceBay.objects.filter(installed_device=self.installed_device).first()\n if current_bay and current_bay != self:\n raise ValidationError({\n 'installed_device': _(\n \"Cannot install the specified device; device is already installed in {bay}.\"\n ).format(bay=current_bay)\n })\n\n\n#\n# Inventory items\n#\n\n\nclass InventoryItemRole(OrganizationalModel):\n \"\"\"\n Inventory items may optionally be assigned a functional role.\n \"\"\"\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n\n class Meta:\n ordering = ('name',)\n verbose_name = _('inventory item role')\n verbose_name_plural = _('inventory item roles')\n\n def get_absolute_url(self):\n return reverse('dcim:inventoryitemrole', args=[self.pk])\n\n\nclass InventoryItem(MPTTModel, ComponentModel, TrackingModelMixin):\n \"\"\"\n An InventoryItem represents a serialized piece of hardware within a Device, such as a line card or power supply.\n InventoryItems are used only for inventory purposes.\n \"\"\"\n parent = TreeForeignKey(\n to='self',\n on_delete=models.CASCADE,\n related_name='child_items',\n blank=True,\n null=True,\n db_index=True\n )\n component_type = models.ForeignKey(\n to='contenttypes.ContentType',\n limit_choices_to=MODULAR_COMPONENT_MODELS,\n on_delete=models.PROTECT,\n related_name='+',\n blank=True,\n null=True\n )\n component_id = models.PositiveBigIntegerField(\n blank=True,\n null=True\n )\n component = GenericForeignKey(\n ct_field='component_type',\n fk_field='component_id'\n )\n role = models.ForeignKey(\n to='dcim.InventoryItemRole',\n on_delete=models.PROTECT,\n related_name='inventory_items',\n blank=True,\n null=True\n )\n manufacturer = models.ForeignKey(\n to='dcim.Manufacturer',\n on_delete=models.PROTECT,\n related_name='inventory_items',\n blank=True,\n null=True\n )\n part_id = models.CharField(\n max_length=50,\n verbose_name=_('part ID'),\n blank=True,\n help_text=_('Manufacturer-assigned part identifier')\n )\n serial = models.CharField(\n max_length=50,\n verbose_name=_('serial number'),\n blank=True\n )\n asset_tag = models.CharField(\n max_length=50,\n unique=True,\n blank=True,\n null=True,\n verbose_name=_('asset tag'),\n help_text=_('A unique tag used to identify this item')\n )\n discovered = models.BooleanField(\n verbose_name=_('discovered'),\n default=False,\n help_text=_('This item was automatically discovered')\n )\n\n objects = TreeManager()\n\n clone_fields = ('device', 'parent', 'role', 'manufacturer', 'part_id',)\n\n class Meta:\n ordering = ('device__id', 'parent__id', '_name')\n indexes = (\n models.Index(fields=('component_type', 'component_id')),\n )\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'parent', 'name'),\n name='%(app_label)s_%(class)s_unique_device_parent_name'\n ),\n )\n verbose_name = _('inventory item')\n verbose_name_plural = _('inventory items')\n\n def get_absolute_url(self):\n return reverse('dcim:inventoryitem', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # An InventoryItem cannot be its own parent\n if self.pk and self.parent_id == self.pk:\n raise ValidationError({\n \"parent\": _(\"Cannot assign self as parent.\")\n })\n\n # Validation for moving InventoryItems\n if self.pk:\n # Cannot move an InventoryItem to another device if it has a parent\n if self.parent and self.parent.device != self.device:\n raise ValidationError({\n \"parent\": _(\"Parent inventory item does not belong to the same device.\")\n })\n\n # Prevent moving InventoryItems with children\n first_child = self.get_children().first()\n if first_child and first_child.device != self.device:\n raise ValidationError(_(\"Cannot move an inventory item with dependent children\"))\n\n # When moving an InventoryItem to another device, remove any associated component\n if self.component and self.component.device != self.device:\n self.component = None\n else:\n if self.component and self.component.device != self.device:\n raise ValidationError({\n \"device\": _(\"Cannot assign inventory item to component on another device\")\n })\n", "path": "netbox/dcim/models/device_components.py" } ]
[ { "content": "from functools import cached_property\n\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.db.models import Sum\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom dcim.choices import *\nfrom dcim.constants import *\nfrom dcim.fields import MACAddressField, WWNField\nfrom netbox.models import OrganizationalModel, NetBoxModel\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField, NaturalOrderingField\nfrom utilities.mptt import TreeManager\nfrom utilities.ordering import naturalize_interface\nfrom utilities.query_functions import CollateAsChar\nfrom utilities.tracking import TrackingModelMixin\nfrom wireless.choices import *\nfrom wireless.utils import get_channel_attr\n\n\n__all__ = (\n 'BaseInterface',\n 'CabledObjectModel',\n 'ConsolePort',\n 'ConsoleServerPort',\n 'DeviceBay',\n 'FrontPort',\n 'Interface',\n 'InventoryItem',\n 'InventoryItemRole',\n 'ModuleBay',\n 'PathEndpoint',\n 'PowerOutlet',\n 'PowerPort',\n 'RearPort',\n)\n\n\nclass ComponentModel(NetBoxModel):\n \"\"\"\n An abstract model inherited by any model which has a parent Device.\n \"\"\"\n device = models.ForeignKey(\n to='dcim.Device',\n on_delete=models.CASCADE,\n related_name='%(class)ss'\n )\n name = models.CharField(\n verbose_name=_('name'),\n max_length=64\n )\n _name = NaturalOrderingField(\n target_field='name',\n max_length=100,\n blank=True\n )\n label = models.CharField(\n verbose_name=_('label'),\n max_length=64,\n blank=True,\n help_text=_('Physical label')\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True\n )\n\n class Meta:\n abstract = True\n ordering = ('device', '_name')\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'name'),\n name='%(app_label)s_%(class)s_unique_device_name'\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Cache the original Device ID for reference under clean()\n self._original_device = self.__dict__.get('device_id')\n\n def __str__(self):\n if self.label:\n return f\"{self.name} ({self.label})\"\n return self.name\n\n def to_objectchange(self, action):\n objectchange = super().to_objectchange(action)\n objectchange.related_object = self.device\n return objectchange\n\n def clean(self):\n super().clean()\n\n # Check list of Modules that allow device field to be changed\n if (type(self) not in [InventoryItem]) and (self.pk is not None) and (self._original_device != self.device_id):\n raise ValidationError({\n \"device\": _(\"Components cannot be moved to a different device.\")\n })\n\n @property\n def parent_object(self):\n return self.device\n\n\nclass ModularComponentModel(ComponentModel):\n module = models.ForeignKey(\n to='dcim.Module',\n on_delete=models.CASCADE,\n related_name='%(class)ss',\n blank=True,\n null=True\n )\n inventory_items = GenericRelation(\n to='dcim.InventoryItem',\n content_type_field='component_type',\n object_id_field='component_id'\n )\n\n class Meta(ComponentModel.Meta):\n abstract = True\n\n\nclass CabledObjectModel(models.Model):\n \"\"\"\n An abstract model inherited by all models to which a Cable can terminate. Provides the `cable` and `cable_end`\n fields for caching cable associations, as well as `mark_connected` to designate \"fake\" connections.\n \"\"\"\n cable = models.ForeignKey(\n to='dcim.Cable',\n on_delete=models.SET_NULL,\n related_name='+',\n blank=True,\n null=True\n )\n cable_end = models.CharField(\n verbose_name=_('cable end'),\n max_length=1,\n blank=True,\n choices=CableEndChoices\n )\n mark_connected = models.BooleanField(\n verbose_name=_('mark connected'),\n default=False,\n help_text=_('Treat as if a cable is connected')\n )\n\n cable_terminations = GenericRelation(\n to='dcim.CableTermination',\n content_type_field='termination_type',\n object_id_field='termination_id',\n related_query_name='%(class)s',\n )\n\n class Meta:\n abstract = True\n\n def clean(self):\n super().clean()\n\n if self.cable and not self.cable_end:\n raise ValidationError({\n \"cable_end\": _(\"Must specify cable end (A or B) when attaching a cable.\")\n })\n if self.cable_end and not self.cable:\n raise ValidationError({\n \"cable_end\": _(\"Cable end must not be set without a cable.\")\n })\n if self.mark_connected and self.cable:\n raise ValidationError({\n \"mark_connected\": _(\"Cannot mark as connected with a cable attached.\")\n })\n\n @property\n def link(self):\n \"\"\"\n Generic wrapper for a Cable, WirelessLink, or some other relation to a connected termination.\n \"\"\"\n return self.cable\n\n @cached_property\n def link_peers(self):\n if self.cable:\n peers = self.cable.terminations.exclude(cable_end=self.cable_end).prefetch_related('termination')\n return [peer.termination for peer in peers]\n return []\n\n @property\n def _occupied(self):\n return bool(self.mark_connected or self.cable_id)\n\n @property\n def parent_object(self):\n raise NotImplementedError(\n _(\"{class_name} models must declare a parent_object property\").format(class_name=self.__class__.__name__)\n )\n\n @property\n def opposite_cable_end(self):\n if not self.cable_end:\n return None\n return CableEndChoices.SIDE_A if self.cable_end == CableEndChoices.SIDE_B else CableEndChoices.SIDE_B\n\n\nclass PathEndpoint(models.Model):\n \"\"\"\n An abstract model inherited by any CabledObjectModel subclass which represents the end of a CablePath; specifically,\n these include ConsolePort, ConsoleServerPort, PowerPort, PowerOutlet, Interface, and PowerFeed.\n\n `_path` references the CablePath originating from this instance, if any. It is set or cleared by the receivers in\n dcim.signals in response to changes in the cable path, and complements the `origin` GenericForeignKey field on the\n CablePath model. `_path` should not be accessed directly; rather, use the `path` property.\n\n `connected_endpoints()` is a convenience method for returning the destination of the associated CablePath, if any.\n \"\"\"\n _path = models.ForeignKey(\n to='dcim.CablePath',\n on_delete=models.SET_NULL,\n null=True,\n blank=True\n )\n\n class Meta:\n abstract = True\n\n def trace(self):\n origin = self\n path = []\n\n # Construct the complete path (including e.g. bridged interfaces)\n while origin is not None:\n\n if origin._path is None:\n break\n\n path.extend(origin._path.path_objects)\n\n # If the path ends at a non-connected pass-through port, pad out the link and far-end terminations\n if len(path) % 3 == 1:\n path.extend(([], []))\n # If the path ends at a site or provider network, inject a null \"link\" to render an attachment\n elif len(path) % 3 == 2:\n path.insert(-1, [])\n\n # Check for a bridged relationship to continue the trace\n destinations = origin._path.destinations\n if len(destinations) == 1:\n origin = getattr(destinations[0], 'bridge', None)\n else:\n origin = None\n\n # Return the path as a list of three-tuples (A termination(s), cable(s), B termination(s))\n return list(zip(*[iter(path)] * 3))\n\n @property\n def path(self):\n return self._path\n\n @cached_property\n def connected_endpoints(self):\n \"\"\"\n Caching accessor for the attached CablePath's destination (if any)\n \"\"\"\n return self._path.destinations if self._path else []\n\n\n#\n# Console components\n#\n\nclass ConsolePort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical console port within a Device. ConsolePorts connect to ConsoleServerPorts.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=ConsolePortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n speed = models.PositiveIntegerField(\n verbose_name=_('speed'),\n choices=ConsolePortSpeedChoices,\n blank=True,\n null=True,\n help_text=_('Port speed in bits per second')\n )\n\n clone_fields = ('device', 'module', 'type', 'speed')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('console port')\n verbose_name_plural = _('console ports')\n\n def get_absolute_url(self):\n return reverse('dcim:consoleport', kwargs={'pk': self.pk})\n\n\nclass ConsoleServerPort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical port within a Device (typically a designated console server) which provides access to ConsolePorts.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=ConsolePortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n speed = models.PositiveIntegerField(\n verbose_name=_('speed'),\n choices=ConsolePortSpeedChoices,\n blank=True,\n null=True,\n help_text=_('Port speed in bits per second')\n )\n\n clone_fields = ('device', 'module', 'type', 'speed')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('console server port')\n verbose_name_plural = _('console server ports')\n\n def get_absolute_url(self):\n return reverse('dcim:consoleserverport', kwargs={'pk': self.pk})\n\n\n#\n# Power components\n#\n\nclass PowerPort(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical power supply (intake) port within a Device. PowerPorts connect to PowerOutlets.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PowerPortTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n maximum_draw = models.PositiveIntegerField(\n verbose_name=_('maximum draw'),\n blank=True,\n null=True,\n validators=[MinValueValidator(1)],\n help_text=_(\"Maximum power draw (watts)\")\n )\n allocated_draw = models.PositiveIntegerField(\n verbose_name=_('allocated draw'),\n blank=True,\n null=True,\n validators=[MinValueValidator(1)],\n help_text=_('Allocated power draw (watts)')\n )\n\n clone_fields = ('device', 'module', 'maximum_draw', 'allocated_draw')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('power port')\n verbose_name_plural = _('power ports')\n\n def get_absolute_url(self):\n return reverse('dcim:powerport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n if self.maximum_draw is not None and self.allocated_draw is not None:\n if self.allocated_draw > self.maximum_draw:\n raise ValidationError({\n 'allocated_draw': _(\n \"Allocated draw cannot exceed the maximum draw ({maximum_draw}W).\"\n ).format(maximum_draw=self.maximum_draw)\n })\n\n def get_downstream_powerports(self, leg=None):\n \"\"\"\n Return a queryset of all PowerPorts connected via cable to a child PowerOutlet. For example, in the topology\n below, PP1.get_downstream_powerports() would return PP2-4.\n\n ---- PO1 <---> PP2\n /\n PP1 ------- PO2 <---> PP3\n \\\n ---- PO3 <---> PP4\n\n \"\"\"\n poweroutlets = self.poweroutlets.filter(cable__isnull=False)\n if leg:\n poweroutlets = poweroutlets.filter(feed_leg=leg)\n if not poweroutlets:\n return PowerPort.objects.none()\n\n q = Q()\n for poweroutlet in poweroutlets:\n q |= Q(\n cable=poweroutlet.cable,\n cable_end=poweroutlet.opposite_cable_end\n )\n\n return PowerPort.objects.filter(q)\n\n def get_power_draw(self):\n \"\"\"\n Return the allocated and maximum power draw (in VA) and child PowerOutlet count for this PowerPort.\n \"\"\"\n from dcim.models import PowerFeed\n\n # Calculate aggregate draw of all child power outlets if no numbers have been defined manually\n if self.allocated_draw is None and self.maximum_draw is None:\n utilization = self.get_downstream_powerports().aggregate(\n maximum_draw_total=Sum('maximum_draw'),\n allocated_draw_total=Sum('allocated_draw'),\n )\n ret = {\n 'allocated': utilization['allocated_draw_total'] or 0,\n 'maximum': utilization['maximum_draw_total'] or 0,\n 'outlet_count': self.poweroutlets.count(),\n 'legs': [],\n }\n\n # Calculate per-leg aggregates for three-phase power feeds\n if len(self.link_peers) == 1 and isinstance(self.link_peers[0], PowerFeed) and \\\n self.link_peers[0].phase == PowerFeedPhaseChoices.PHASE_3PHASE:\n for leg, leg_name in PowerOutletFeedLegChoices:\n utilization = self.get_downstream_powerports(leg=leg).aggregate(\n maximum_draw_total=Sum('maximum_draw'),\n allocated_draw_total=Sum('allocated_draw'),\n )\n ret['legs'].append({\n 'name': leg_name,\n 'allocated': utilization['allocated_draw_total'] or 0,\n 'maximum': utilization['maximum_draw_total'] or 0,\n 'outlet_count': self.poweroutlets.filter(feed_leg=leg).count(),\n })\n\n return ret\n\n # Default to administratively defined values\n return {\n 'allocated': self.allocated_draw or 0,\n 'maximum': self.maximum_draw or 0,\n 'outlet_count': self.poweroutlets.count(),\n 'legs': [],\n }\n\n\nclass PowerOutlet(ModularComponentModel, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A physical power outlet (output) within a Device which provides power to a PowerPort.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PowerOutletTypeChoices,\n blank=True,\n help_text=_('Physical port type')\n )\n power_port = models.ForeignKey(\n to='dcim.PowerPort',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='poweroutlets'\n )\n feed_leg = models.CharField(\n verbose_name=_('feed leg'),\n max_length=50,\n choices=PowerOutletFeedLegChoices,\n blank=True,\n help_text=_('Phase (for three-phase feeds)')\n )\n\n clone_fields = ('device', 'module', 'type', 'power_port', 'feed_leg')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('power outlet')\n verbose_name_plural = _('power outlets')\n\n def get_absolute_url(self):\n return reverse('dcim:poweroutlet', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Validate power port assignment\n if self.power_port and self.power_port.device != self.device:\n raise ValidationError(\n _(\"Parent power port ({power_port}) must belong to the same device\").format(power_port=self.power_port)\n )\n\n\n#\n# Interfaces\n#\n\nclass BaseInterface(models.Model):\n \"\"\"\n Abstract base class for fields shared by dcim.Interface and virtualization.VMInterface.\n \"\"\"\n enabled = models.BooleanField(\n verbose_name=_('enabled'),\n default=True\n )\n mac_address = MACAddressField(\n null=True,\n blank=True,\n verbose_name=_('MAC address')\n )\n mtu = models.PositiveIntegerField(\n blank=True,\n null=True,\n validators=[\n MinValueValidator(INTERFACE_MTU_MIN),\n MaxValueValidator(INTERFACE_MTU_MAX)\n ],\n verbose_name=_('MTU')\n )\n mode = models.CharField(\n verbose_name=_('mode'),\n max_length=50,\n choices=InterfaceModeChoices,\n blank=True,\n help_text=_('IEEE 802.1Q tagging strategy')\n )\n parent = models.ForeignKey(\n to='self',\n on_delete=models.RESTRICT,\n related_name='child_interfaces',\n null=True,\n blank=True,\n verbose_name=_('parent interface')\n )\n bridge = models.ForeignKey(\n to='self',\n on_delete=models.SET_NULL,\n related_name='bridge_interfaces',\n null=True,\n blank=True,\n verbose_name=_('bridge interface')\n )\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n\n # Remove untagged VLAN assignment for non-802.1Q interfaces\n if not self.mode:\n self.untagged_vlan = None\n\n # Only \"tagged\" interfaces may have tagged VLANs assigned. (\"tagged all\" implies all VLANs are assigned.)\n if self.pk and self.mode != InterfaceModeChoices.MODE_TAGGED:\n self.tagged_vlans.clear()\n\n return super().save(*args, **kwargs)\n\n @property\n def tunnel_termination(self):\n return self.tunnel_terminations.first()\n\n @property\n def count_ipaddresses(self):\n return self.ip_addresses.count()\n\n @property\n def count_fhrp_groups(self):\n return self.fhrp_group_assignments.count()\n\n\nclass Interface(ModularComponentModel, BaseInterface, CabledObjectModel, PathEndpoint, TrackingModelMixin):\n \"\"\"\n A network interface within a Device. A physical Interface can connect to exactly one other Interface.\n \"\"\"\n # Override ComponentModel._name to specify naturalize_interface function\n _name = NaturalOrderingField(\n target_field='name',\n naturalize_function=naturalize_interface,\n max_length=100,\n blank=True\n )\n vdcs = models.ManyToManyField(\n to='dcim.VirtualDeviceContext',\n related_name='interfaces'\n )\n lag = models.ForeignKey(\n to='self',\n on_delete=models.SET_NULL,\n related_name='member_interfaces',\n null=True,\n blank=True,\n verbose_name=_('parent LAG')\n )\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=InterfaceTypeChoices\n )\n mgmt_only = models.BooleanField(\n default=False,\n verbose_name=_('management only'),\n help_text=_('This interface is used only for out-of-band management')\n )\n speed = models.PositiveIntegerField(\n blank=True,\n null=True,\n verbose_name=_('speed (Kbps)')\n )\n duplex = models.CharField(\n verbose_name=_('duplex'),\n max_length=50,\n blank=True,\n null=True,\n choices=InterfaceDuplexChoices\n )\n wwn = WWNField(\n null=True,\n blank=True,\n verbose_name=_('WWN'),\n help_text=_('64-bit World Wide Name')\n )\n rf_role = models.CharField(\n max_length=30,\n choices=WirelessRoleChoices,\n blank=True,\n verbose_name=_('wireless role')\n )\n rf_channel = models.CharField(\n max_length=50,\n choices=WirelessChannelChoices,\n blank=True,\n verbose_name=_('wireless channel')\n )\n rf_channel_frequency = models.DecimalField(\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n verbose_name=_('channel frequency (MHz)'),\n help_text=_(\"Populated by selected channel (if set)\")\n )\n rf_channel_width = models.DecimalField(\n max_digits=7,\n decimal_places=3,\n blank=True,\n null=True,\n verbose_name=('channel width (MHz)'),\n help_text=_(\"Populated by selected channel (if set)\")\n )\n tx_power = models.PositiveSmallIntegerField(\n blank=True,\n null=True,\n validators=(MaxValueValidator(127),),\n verbose_name=_('transmit power (dBm)')\n )\n poe_mode = models.CharField(\n max_length=50,\n choices=InterfacePoEModeChoices,\n blank=True,\n verbose_name=_('PoE mode')\n )\n poe_type = models.CharField(\n max_length=50,\n choices=InterfacePoETypeChoices,\n blank=True,\n verbose_name=_('PoE type')\n )\n wireless_link = models.ForeignKey(\n to='wireless.WirelessLink',\n on_delete=models.SET_NULL,\n related_name='+',\n blank=True,\n null=True\n )\n wireless_lans = models.ManyToManyField(\n to='wireless.WirelessLAN',\n related_name='interfaces',\n blank=True,\n verbose_name=_('wireless LANs')\n )\n untagged_vlan = models.ForeignKey(\n to='ipam.VLAN',\n on_delete=models.SET_NULL,\n related_name='interfaces_as_untagged',\n null=True,\n blank=True,\n verbose_name=_('untagged VLAN')\n )\n tagged_vlans = models.ManyToManyField(\n to='ipam.VLAN',\n related_name='interfaces_as_tagged',\n blank=True,\n verbose_name=_('tagged VLANs')\n )\n vrf = models.ForeignKey(\n to='ipam.VRF',\n on_delete=models.SET_NULL,\n related_name='interfaces',\n null=True,\n blank=True,\n verbose_name=_('VRF')\n )\n ip_addresses = GenericRelation(\n to='ipam.IPAddress',\n content_type_field='assigned_object_type',\n object_id_field='assigned_object_id',\n related_query_name='interface'\n )\n fhrp_group_assignments = GenericRelation(\n to='ipam.FHRPGroupAssignment',\n content_type_field='interface_type',\n object_id_field='interface_id',\n related_query_name='+'\n )\n tunnel_terminations = GenericRelation(\n to='vpn.TunnelTermination',\n content_type_field='termination_type',\n object_id_field='termination_id',\n related_query_name='interface'\n )\n l2vpn_terminations = GenericRelation(\n to='vpn.L2VPNTermination',\n content_type_field='assigned_object_type',\n object_id_field='assigned_object_id',\n related_query_name='interface',\n )\n\n clone_fields = (\n 'device', 'module', 'parent', 'bridge', 'lag', 'type', 'mgmt_only', 'mtu', 'mode', 'speed', 'duplex', 'rf_role',\n 'rf_channel', 'rf_channel_frequency', 'rf_channel_width', 'tx_power', 'poe_mode', 'poe_type', 'vrf',\n )\n\n class Meta(ModularComponentModel.Meta):\n ordering = ('device', CollateAsChar('_name'))\n verbose_name = _('interface')\n verbose_name_plural = _('interfaces')\n\n def get_absolute_url(self):\n return reverse('dcim:interface', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Virtual Interfaces cannot have a Cable attached\n if self.is_virtual and self.cable:\n raise ValidationError({\n 'type': _(\"{display_type} interfaces cannot have a cable attached.\").format(\n display_type=self.get_type_display()\n )\n })\n\n # Virtual Interfaces cannot be marked as connected\n if self.is_virtual and self.mark_connected:\n raise ValidationError({\n 'mark_connected': _(\"{display_type} interfaces cannot be marked as connected.\".format(\n display_type=self.get_type_display())\n )\n })\n\n # Parent validation\n\n # An interface cannot be its own parent\n if self.pk and self.parent_id == self.pk:\n raise ValidationError({'parent': _(\"An interface cannot be its own parent.\")})\n\n # A physical interface cannot have a parent interface\n if self.type != InterfaceTypeChoices.TYPE_VIRTUAL and self.parent is not None:\n raise ValidationError({'parent': _(\"Only virtual interfaces may be assigned to a parent interface.\")})\n\n # An interface's parent must belong to the same device or virtual chassis\n if self.parent and self.parent.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'parent': _(\n \"The selected parent interface ({interface}) belongs to a different device ({device})\"\n ).format(interface=self.parent, device=self.parent.device)\n })\n elif self.parent.device.virtual_chassis != self.parent.virtual_chassis:\n raise ValidationError({\n 'parent': _(\n \"The selected parent interface ({interface}) belongs to {device}, which is not part of \"\n \"virtual chassis {virtual_chassis}.\"\n ).format(\n interface=self.parent,\n device=self.parent_device,\n virtual_chassis=self.device.virtual_chassis\n )\n })\n\n # Bridge validation\n\n # An interface cannot be bridged to itself\n if self.pk and self.bridge_id == self.pk:\n raise ValidationError({'bridge': _(\"An interface cannot be bridged to itself.\")})\n\n # A bridged interface belong to the same device or virtual chassis\n if self.bridge and self.bridge.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'bridge': _(\n \"The selected bridge interface ({bridge}) belongs to a different device ({device}).\"\n ).format(bridge=self.bridge, device=self.bridge.device)\n })\n elif self.bridge.device.virtual_chassis != self.device.virtual_chassis:\n raise ValidationError({\n 'bridge': _(\n \"The selected bridge interface ({interface}) belongs to {device}, which is not part of virtual \"\n \"chassis {virtual_chassis}.\"\n ).format(\n interface=self.bridge, device=self.bridge.device, virtual_chassis=self.device.virtual_chassis\n )\n })\n\n # LAG validation\n\n # A virtual interface cannot have a parent LAG\n if self.type == InterfaceTypeChoices.TYPE_VIRTUAL and self.lag is not None:\n raise ValidationError({'lag': _(\"Virtual interfaces cannot have a parent LAG interface.\")})\n\n # A LAG interface cannot be its own parent\n if self.pk and self.lag_id == self.pk:\n raise ValidationError({'lag': _(\"A LAG interface cannot be its own parent.\")})\n\n # An interface's LAG must belong to the same device or virtual chassis\n if self.lag and self.lag.device != self.device:\n if self.device.virtual_chassis is None:\n raise ValidationError({\n 'lag': _(\n \"The selected LAG interface ({lag}) belongs to a different device ({device}).\"\n ).format(lag=self.lag, device=self.lag.device)\n })\n elif self.lag.device.virtual_chassis != self.device.virtual_chassis:\n raise ValidationError({\n 'lag': _(\n \"The selected LAG interface ({lag}) belongs to {device}, which is not part of virtual chassis \"\n \"{virtual_chassis}.\".format(\n lag=self.lag, device=self.lag.device, virtual_chassis=self.device.virtual_chassis)\n )\n })\n\n # PoE validation\n\n # Only physical interfaces may have a PoE mode/type assigned\n if self.poe_mode and self.is_virtual:\n raise ValidationError({\n 'poe_mode': _(\"Virtual interfaces cannot have a PoE mode.\")\n })\n if self.poe_type and self.is_virtual:\n raise ValidationError({\n 'poe_type': _(\"Virtual interfaces cannot have a PoE type.\")\n })\n\n # An interface with a PoE type set must also specify a mode\n if self.poe_type and not self.poe_mode:\n raise ValidationError({\n 'poe_type': _(\"Must specify PoE mode when designating a PoE type.\")\n })\n\n # Wireless validation\n\n # RF role & channel may only be set for wireless interfaces\n if self.rf_role and not self.is_wireless:\n raise ValidationError({'rf_role': _(\"Wireless role may be set only on wireless interfaces.\")})\n if self.rf_channel and not self.is_wireless:\n raise ValidationError({'rf_channel': _(\"Channel may be set only on wireless interfaces.\")})\n\n # Validate channel frequency against interface type and selected channel (if any)\n if self.rf_channel_frequency:\n if not self.is_wireless:\n raise ValidationError({\n 'rf_channel_frequency': _(\"Channel frequency may be set only on wireless interfaces.\"),\n })\n if self.rf_channel and self.rf_channel_frequency != get_channel_attr(self.rf_channel, 'frequency'):\n raise ValidationError({\n 'rf_channel_frequency': _(\"Cannot specify custom frequency with channel selected.\"),\n })\n\n # Validate channel width against interface type and selected channel (if any)\n if self.rf_channel_width:\n if not self.is_wireless:\n raise ValidationError({'rf_channel_width': _(\"Channel width may be set only on wireless interfaces.\")})\n if self.rf_channel and self.rf_channel_width != get_channel_attr(self.rf_channel, 'width'):\n raise ValidationError({'rf_channel_width': _(\"Cannot specify custom width with channel selected.\")})\n\n # VLAN validation\n\n # Validate untagged VLAN\n if self.untagged_vlan and self.untagged_vlan.site not in [self.device.site, None]:\n raise ValidationError({\n 'untagged_vlan': _(\n \"The untagged VLAN ({untagged_vlan}) must belong to the same site as the interface's parent \"\n \"device, or it must be global.\"\n ).format(untagged_vlan=self.untagged_vlan)\n })\n\n def save(self, *args, **kwargs):\n\n # Set absolute channel attributes from selected options\n if self.rf_channel and not self.rf_channel_frequency:\n self.rf_channel_frequency = get_channel_attr(self.rf_channel, 'frequency')\n if self.rf_channel and not self.rf_channel_width:\n self.rf_channel_width = get_channel_attr(self.rf_channel, 'width')\n\n super().save(*args, **kwargs)\n\n @property\n def _occupied(self):\n return super()._occupied or bool(self.wireless_link_id)\n\n @property\n def is_wired(self):\n return not self.is_virtual and not self.is_wireless\n\n @property\n def is_virtual(self):\n return self.type in VIRTUAL_IFACE_TYPES\n\n @property\n def is_wireless(self):\n return self.type in WIRELESS_IFACE_TYPES\n\n @property\n def is_lag(self):\n return self.type == InterfaceTypeChoices.TYPE_LAG\n\n @property\n def is_bridge(self):\n return self.type == InterfaceTypeChoices.TYPE_BRIDGE\n\n @property\n def link(self):\n return self.cable or self.wireless_link\n\n @cached_property\n def link_peers(self):\n if self.cable:\n return super().link_peers\n if self.wireless_link:\n # Return the opposite side of the attached wireless link\n if self.wireless_link.interface_a == self:\n return [self.wireless_link.interface_b]\n else:\n return [self.wireless_link.interface_a]\n return []\n\n @property\n def l2vpn_termination(self):\n return self.l2vpn_terminations.first()\n\n\n#\n# Pass-through ports\n#\n\nclass FrontPort(ModularComponentModel, CabledObjectModel, TrackingModelMixin):\n \"\"\"\n A pass-through port on the front of a Device.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PortTypeChoices\n )\n color = ColorField(\n verbose_name=_('color'),\n blank=True\n )\n rear_port = models.ForeignKey(\n to='dcim.RearPort',\n on_delete=models.CASCADE,\n related_name='frontports'\n )\n rear_port_position = models.PositiveSmallIntegerField(\n verbose_name=_('rear port position'),\n default=1,\n validators=[\n MinValueValidator(REARPORT_POSITIONS_MIN),\n MaxValueValidator(REARPORT_POSITIONS_MAX)\n ],\n help_text=_('Mapped position on corresponding rear port')\n )\n\n clone_fields = ('device', 'type', 'color')\n\n class Meta(ModularComponentModel.Meta):\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'name'),\n name='%(app_label)s_%(class)s_unique_device_name'\n ),\n models.UniqueConstraint(\n fields=('rear_port', 'rear_port_position'),\n name='%(app_label)s_%(class)s_unique_rear_port_position'\n ),\n )\n verbose_name = _('front port')\n verbose_name_plural = _('front ports')\n\n def get_absolute_url(self):\n return reverse('dcim:frontport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n if hasattr(self, 'rear_port'):\n\n # Validate rear port assignment\n if self.rear_port.device != self.device:\n raise ValidationError({\n \"rear_port\": _(\n \"Rear port ({rear_port}) must belong to the same device\"\n ).format(rear_port=self.rear_port)\n })\n\n # Validate rear port position assignment\n if self.rear_port_position > self.rear_port.positions:\n raise ValidationError({\n \"rear_port_position\": _(\n \"Invalid rear port position ({rear_port_position}): Rear port {name} has only {positions} \"\n \"positions.\"\n ).format(\n rear_port_position=self.rear_port_position,\n name=self.rear_port.name,\n positions=self.rear_port.positions\n )\n })\n\n\nclass RearPort(ModularComponentModel, CabledObjectModel, TrackingModelMixin):\n \"\"\"\n A pass-through port on the rear of a Device.\n \"\"\"\n type = models.CharField(\n verbose_name=_('type'),\n max_length=50,\n choices=PortTypeChoices\n )\n color = ColorField(\n verbose_name=_('color'),\n blank=True\n )\n positions = models.PositiveSmallIntegerField(\n verbose_name=_('positions'),\n default=1,\n validators=[\n MinValueValidator(REARPORT_POSITIONS_MIN),\n MaxValueValidator(REARPORT_POSITIONS_MAX)\n ],\n help_text=_('Number of front ports which may be mapped')\n )\n clone_fields = ('device', 'type', 'color', 'positions')\n\n class Meta(ModularComponentModel.Meta):\n verbose_name = _('rear port')\n verbose_name_plural = _('rear ports')\n\n def get_absolute_url(self):\n return reverse('dcim:rearport', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Check that positions count is greater than or equal to the number of associated FrontPorts\n if self.pk:\n frontport_count = self.frontports.count()\n if self.positions < frontport_count:\n raise ValidationError({\n \"positions\": _(\n \"The number of positions cannot be less than the number of mapped front ports \"\n \"({frontport_count})\"\n ).format(frontport_count=frontport_count)\n })\n\n\n#\n# Bays\n#\n\nclass ModuleBay(ComponentModel, TrackingModelMixin):\n \"\"\"\n An empty space within a Device which can house a child device\n \"\"\"\n position = models.CharField(\n verbose_name=_('position'),\n max_length=30,\n blank=True,\n help_text=_('Identifier to reference when renaming installed components')\n )\n\n clone_fields = ('device',)\n\n class Meta(ComponentModel.Meta):\n verbose_name = _('module bay')\n verbose_name_plural = _('module bays')\n\n def get_absolute_url(self):\n return reverse('dcim:modulebay', kwargs={'pk': self.pk})\n\n\nclass DeviceBay(ComponentModel, TrackingModelMixin):\n \"\"\"\n An empty space within a Device which can house a child device\n \"\"\"\n installed_device = models.OneToOneField(\n to='dcim.Device',\n on_delete=models.SET_NULL,\n related_name='parent_bay',\n blank=True,\n null=True\n )\n\n clone_fields = ('device',)\n\n class Meta(ComponentModel.Meta):\n verbose_name = _('device bay')\n verbose_name_plural = _('device bays')\n\n def get_absolute_url(self):\n return reverse('dcim:devicebay', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # Validate that the parent Device can have DeviceBays\n if not self.device.device_type.is_parent_device:\n raise ValidationError(_(\"This type of device ({device_type}) does not support device bays.\").format(\n device_type=self.device.device_type\n ))\n\n # Cannot install a device into itself, obviously\n if self.device == self.installed_device:\n raise ValidationError(_(\"Cannot install a device into itself.\"))\n\n # Check that the installed device is not already installed elsewhere\n if self.installed_device:\n current_bay = DeviceBay.objects.filter(installed_device=self.installed_device).first()\n if current_bay and current_bay != self:\n raise ValidationError({\n 'installed_device': _(\n \"Cannot install the specified device; device is already installed in {bay}.\"\n ).format(bay=current_bay)\n })\n\n\n#\n# Inventory items\n#\n\n\nclass InventoryItemRole(OrganizationalModel):\n \"\"\"\n Inventory items may optionally be assigned a functional role.\n \"\"\"\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n\n class Meta:\n ordering = ('name',)\n verbose_name = _('inventory item role')\n verbose_name_plural = _('inventory item roles')\n\n def get_absolute_url(self):\n return reverse('dcim:inventoryitemrole', args=[self.pk])\n\n\nclass InventoryItem(MPTTModel, ComponentModel, TrackingModelMixin):\n \"\"\"\n An InventoryItem represents a serialized piece of hardware within a Device, such as a line card or power supply.\n InventoryItems are used only for inventory purposes.\n \"\"\"\n parent = TreeForeignKey(\n to='self',\n on_delete=models.CASCADE,\n related_name='child_items',\n blank=True,\n null=True,\n db_index=True\n )\n component_type = models.ForeignKey(\n to='contenttypes.ContentType',\n limit_choices_to=MODULAR_COMPONENT_MODELS,\n on_delete=models.PROTECT,\n related_name='+',\n blank=True,\n null=True\n )\n component_id = models.PositiveBigIntegerField(\n blank=True,\n null=True\n )\n component = GenericForeignKey(\n ct_field='component_type',\n fk_field='component_id'\n )\n role = models.ForeignKey(\n to='dcim.InventoryItemRole',\n on_delete=models.PROTECT,\n related_name='inventory_items',\n blank=True,\n null=True\n )\n manufacturer = models.ForeignKey(\n to='dcim.Manufacturer',\n on_delete=models.PROTECT,\n related_name='inventory_items',\n blank=True,\n null=True\n )\n part_id = models.CharField(\n max_length=50,\n verbose_name=_('part ID'),\n blank=True,\n help_text=_('Manufacturer-assigned part identifier')\n )\n serial = models.CharField(\n max_length=50,\n verbose_name=_('serial number'),\n blank=True\n )\n asset_tag = models.CharField(\n max_length=50,\n unique=True,\n blank=True,\n null=True,\n verbose_name=_('asset tag'),\n help_text=_('A unique tag used to identify this item')\n )\n discovered = models.BooleanField(\n verbose_name=_('discovered'),\n default=False,\n help_text=_('This item was automatically discovered')\n )\n\n objects = TreeManager()\n\n clone_fields = ('device', 'parent', 'role', 'manufacturer', 'part_id',)\n\n class Meta:\n ordering = ('device__id', 'parent__id', '_name')\n indexes = (\n models.Index(fields=('component_type', 'component_id')),\n )\n constraints = (\n models.UniqueConstraint(\n fields=('device', 'parent', 'name'),\n name='%(app_label)s_%(class)s_unique_device_parent_name'\n ),\n )\n verbose_name = _('inventory item')\n verbose_name_plural = _('inventory items')\n\n def get_absolute_url(self):\n return reverse('dcim:inventoryitem', kwargs={'pk': self.pk})\n\n def clean(self):\n super().clean()\n\n # An InventoryItem cannot be its own parent\n if self.pk and self.parent_id == self.pk:\n raise ValidationError({\n \"parent\": _(\"Cannot assign self as parent.\")\n })\n\n # Validation for moving InventoryItems\n if self.pk:\n # Cannot move an InventoryItem to another device if it has a parent\n if self.parent and self.parent.device != self.device:\n raise ValidationError({\n \"parent\": _(\"Parent inventory item does not belong to the same device.\")\n })\n\n # Prevent moving InventoryItems with children\n first_child = self.get_children().first()\n if first_child and first_child.device != self.device:\n raise ValidationError(_(\"Cannot move an inventory item with dependent children\"))\n\n # When moving an InventoryItem to another device, remove any associated component\n if self.component and self.component.device != self.device:\n self.component = None\n else:\n if self.component and self.component.device != self.device:\n raise ValidationError({\n \"device\": _(\"Cannot assign inventory item to component on another device\")\n })\n", "path": "netbox/dcim/models/device_components.py" } ]
diff --git a/netbox/dcim/models/device_components.py b/netbox/dcim/models/device_components.py index ef235078fa3..88dddb31267 100644 --- a/netbox/dcim/models/device_components.py +++ b/netbox/dcim/models/device_components.py @@ -1115,7 +1115,7 @@ class DeviceBay(ComponentModel, TrackingModelMixin): installed_device = models.OneToOneField( to='dcim.Device', on_delete=models.SET_NULL, - related_name=_('parent_bay'), + related_name='parent_bay', blank=True, null=True )
projectmesa__mesa-891
Cookiecutter doesn't work on 0.8.7 release **Describe the bug** `mesa startproject` fails after `pipenv install mesa` ``` A valid repository for "/home/neil/.local/share/virtualenvs/baseline-economy-6fg_iky1/lib/python3.8/site-packages/mesa/cookiecutter-mesa" could not be found in the following locations: ... ``` **Expected behavior** Generate the project layout **To Reproduce** - pipenv install mesa - mesa startproject **Additional context** The cookiecutter directory from the repo is missing from the installation. Additionally there is no help message for `startproject` when you run `mesa --help`
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\"coverage\", \"flake8\", \"pytest >= 3.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\", \"r\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/fonts/*\",\n \"visualization/templates/js/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\"coverage\", \"flake8\", \"pytest >= 4.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\", \"r\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/fonts/*\",\n \"visualization/templates/js/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n)\n", "path": "setup.py" } ]
diff --git a/MANIFEST.in b/MANIFEST.in index d51d8d3685a..697897ce67e 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,7 @@ include LICENSE include HISTORY.rst include README.rst include setup.py +include mesa/cookiecutter-mesa/* include mesa/visualization/templates/*.html include mesa/visualization/templates/css/* include mesa/visualization/templates/fonts/* diff --git a/setup.py b/setup.py index f0fcfb29e85..df7a9898fc9 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"] extras_require = { - "dev": ["coverage", "flake8", "pytest >= 3.6", "pytest-cov", "sphinx"], + "dev": ["coverage", "flake8", "pytest >= 4.6", "pytest-cov", "sphinx"], "docs": ["sphinx"], }
flairNLP__flair-2070
Keep newline in Sentence for sequence tagging prediction **Describe the bug** I'd like to keep the newline character `\n` when doing sequence tagging prediction (keeping it during training seems easier because of the different code paths). https://github.com/flairNLP/flair/blob/master/flair/data.py#L605 This line removes the newline character after the text has been tokenised. Given that `flair` is very flexible in terms of tokenisation, I think this last-minute modification of tokens is unwelcome. If I wanted to have run `.strip()` on my tokens, I should have done it during tokenisation. I understand I can replace `\n` with another unused character and then swap them back later but it'd be cleaner if we can avoid that. **To Reproduce** `Sentence(["hello", "\n"])`, not using a tokeniser because the text is already tokenised. **Expected behavior** I expect two tokens but I get only one. **Environment (please complete the following information):** - OS: Linux - Version: flair 0.7
[ { "content": "import torch, flair\nimport logging\nimport re\n\nfrom abc import abstractmethod, ABC\n\nfrom collections import Counter\nfrom collections import defaultdict\n\nfrom deprecated import deprecated\nfrom flair.file_utils import Tqdm\nfrom operator import itemgetter\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.dataset import ConcatDataset, Subset\n\nfrom typing import List, Dict, Union, Callable, Optional\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Dictionary:\n \"\"\"\n This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings.\n \"\"\"\n\n def __init__(self, add_unk=True):\n # init dictionaries\n self.item2idx: Dict[str, int] = {}\n self.idx2item: List[str] = []\n self.multi_label: bool = False\n\n # in order to deal with unknown tokens, add <unk>\n if add_unk:\n self.add_item(\"<unk>\")\n\n def add_item(self, item: str) -> int:\n \"\"\"\n add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.\n :param item: a string for which to assign an id.\n :return: ID of string\n \"\"\"\n item = item.encode(\"utf-8\")\n if item not in self.item2idx:\n self.idx2item.append(item)\n self.item2idx[item] = len(self.idx2item) - 1\n return self.item2idx[item]\n\n def get_idx_for_item(self, item: str) -> int:\n \"\"\"\n returns the ID of the string, otherwise 0\n :param item: string for which ID is requested\n :return: ID of string, otherwise 0\n \"\"\"\n item = item.encode(\"utf-8\")\n if item in self.item2idx.keys():\n return self.item2idx[item]\n else:\n return 0\n\n def get_idx_for_items(self, items: List[str]) -> List[int]:\n \"\"\"\n returns the IDs for each item of the list of string, otherwise 0 if not found\n :param items: List of string for which IDs are requested\n :return: List of ID of strings\n \"\"\"\n if not hasattr(self, \"item2idx_not_encoded\"):\n d = dict(\n [(key.decode(\"UTF-8\"), value) for key, value in self.item2idx.items()]\n )\n self.item2idx_not_encoded = defaultdict(int, d)\n\n if not items:\n return []\n results = itemgetter(*items)(self.item2idx_not_encoded)\n if isinstance(results, int):\n return [results]\n return list(results)\n\n def get_items(self) -> List[str]:\n items = []\n for item in self.idx2item:\n items.append(item.decode(\"UTF-8\"))\n return items\n\n def __len__(self) -> int:\n return len(self.idx2item)\n\n def get_item_for_index(self, idx):\n return self.idx2item[idx].decode(\"UTF-8\")\n\n def save(self, savefile):\n import pickle\n\n with open(savefile, \"wb\") as f:\n mappings = {\"idx2item\": self.idx2item, \"item2idx\": self.item2idx}\n pickle.dump(mappings, f)\n\n @classmethod\n def load_from_file(cls, filename: str):\n import pickle\n\n dictionary: Dictionary = Dictionary()\n with open(filename, \"rb\") as f:\n mappings = pickle.load(f, encoding=\"latin1\")\n idx2item = mappings[\"idx2item\"]\n item2idx = mappings[\"item2idx\"]\n dictionary.item2idx = item2idx\n dictionary.idx2item = idx2item\n return dictionary\n\n @classmethod\n def load(cls, name: str):\n from flair.file_utils import cached_path\n hu_path: str = \"https://flair.informatik.hu-berlin.de/resources/characters\"\n if name == \"chars\" or name == \"common-chars\":\n char_dict = cached_path(f\"{hu_path}/common_characters\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n if name == \"chars-large\" or name == \"common-chars-large\":\n char_dict = cached_path(f\"{hu_path}/common_characters_large\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n if name == \"chars-xl\" or name == \"common-chars-xl\":\n char_dict = cached_path(f\"{hu_path}/common_characters_xl\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n return Dictionary.load_from_file(name)\n\n def __str__(self):\n tags = ', '.join(self.get_item_for_index(i) for i in range(min(len(self), 30)))\n return f\"Dictionary with {len(self)} tags: {tags}\"\n\n\nclass Label:\n \"\"\"\n This class represents a label. Each label has a value and optionally a confidence score. The\n score needs to be between 0.0 and 1.0. Default value for the score is 1.0.\n \"\"\"\n\n def __init__(self, value: str, score: float = 1.0):\n self.value = value\n self.score = score\n super().__init__()\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if not value and value != \"\":\n raise ValueError(\n \"Incorrect label value provided. Label value needs to be set.\"\n )\n else:\n self._value = value\n\n @property\n def score(self):\n return self._score\n\n @score.setter\n def score(self, score):\n if 0.0 <= score <= 1.0:\n self._score = score\n else:\n self._score = 1.0\n\n def to_dict(self):\n return {\"value\": self.value, \"confidence\": self.score}\n\n def __str__(self):\n return f\"{self._value} ({round(self._score, 4)})\"\n\n def __repr__(self):\n return f\"{self._value} ({round(self._score, 4)})\"\n\n\nclass DataPoint:\n \"\"\"\n This is the parent class of all data points in Flair (including Token, Sentence, Image, etc.). Each DataPoint\n must be embeddable (hence the abstract property embedding() and methods to() and clear_embeddings()). Also,\n each DataPoint may have Labels in several layers of annotation (hence the functions add_label(), get_labels()\n and the property 'label')\n \"\"\"\n\n def __init__(self):\n self.annotation_layers = {}\n\n @property\n @abstractmethod\n def embedding(self):\n pass\n\n @abstractmethod\n def to(self, device: str, pin_memory: bool = False):\n pass\n\n @abstractmethod\n def clear_embeddings(self, embedding_names: List[str] = None):\n pass\n\n def add_label(self, label_type: str, value: str, score: float = 1.):\n\n if label_type not in self.annotation_layers:\n self.annotation_layers[label_type] = [Label(value, score)]\n else:\n self.annotation_layers[label_type].append(Label(value, score))\n\n return self\n\n def set_label(self, label_type: str, value: str, score: float = 1.):\n self.annotation_layers[label_type] = [Label(value, score)]\n\n return self\n\n def remove_labels(self, label_type: str):\n if label_type in self.annotation_layers.keys():\n del self.annotation_layers[label_type]\n\n def get_labels(self, label_type: str = None):\n if label_type is None:\n return self.labels\n\n return self.annotation_layers[label_type] if label_type in self.annotation_layers else []\n\n @property\n def labels(self) -> List[Label]:\n all_labels = []\n for key in self.annotation_layers.keys():\n all_labels.extend(self.annotation_layers[key])\n return all_labels\n\n\nclass DataPair(DataPoint):\n def __init__(self, first: DataPoint, second: DataPoint):\n super().__init__()\n self.first = first\n self.second = second\n\n def to(self, device: str, pin_memory: bool = False):\n self.first.to(device, pin_memory)\n self.second.to(device, pin_memory)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n self.first.clear_embeddings(embedding_names)\n self.second.clear_embeddings(embedding_names)\n\n @property\n def embedding(self):\n return torch.cat([self.first.embedding, self.second.embedding])\n\n def __str__(self):\n return f\"DataPair:\\n − First {self.first}\\n − Second {self.second}\\n − Labels: {self.labels}\"\n\n def to_plain_string(self):\n return f\"DataPair: First {self.first} || Second {self.second}\"\n\n def __len__(self):\n return len(self.first) + len(self.second)\n\n\nclass Token(DataPoint):\n \"\"\"\n This class represents one word in a tokenized sentence. Each token may have any number of tags. It may also point\n to its head in a dependency tree.\n \"\"\"\n\n def __init__(\n self,\n text: str,\n idx: int = None,\n head_id: int = None,\n whitespace_after: bool = True,\n start_position: int = None,\n ):\n super().__init__()\n\n self.text: str = text\n self.idx: int = idx\n self.head_id: int = head_id\n self.whitespace_after: bool = whitespace_after\n\n self.start_pos = start_position\n self.end_pos = (\n start_position + len(text) if start_position is not None else None\n )\n\n self.sentence: Sentence = None\n self._embeddings: Dict = {}\n self.tags_proba_dist: Dict[str, List[Label]] = {}\n\n def add_tag_label(self, tag_type: str, tag: Label):\n self.set_label(tag_type, tag.value, tag.score)\n\n def add_tags_proba_dist(self, tag_type: str, tags: List[Label]):\n self.tags_proba_dist[tag_type] = tags\n\n def add_tag(self, tag_type: str, tag_value: str, confidence=1.0):\n self.set_label(tag_type, tag_value, confidence)\n\n def get_tag(self, label_type):\n if len(self.get_labels(label_type)) == 0: return Label('')\n return self.get_labels(label_type)[0]\n\n def get_tags_proba_dist(self, tag_type: str) -> List[Label]:\n if tag_type in self.tags_proba_dist:\n return self.tags_proba_dist[tag_type]\n return []\n\n def get_head(self):\n return self.sentence.get_token(self.head_id)\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def to(self, device: str, pin_memory: bool = False):\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n def get_each_embedding(self, embedding_names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = []\n for embed in sorted(self._embeddings.keys()):\n if embedding_names and embed not in embedding_names: continue\n embed = self._embeddings[embed].to(flair.device)\n if (flair.embedding_storage_mode == \"cpu\") and embed.device != flair.device:\n embed = embed.to(flair.device)\n embeddings.append(embed)\n return embeddings\n\n def get_embedding(self, names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = self.get_each_embedding(names)\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.tensor([], device=flair.device)\n\n @property\n def start_position(self) -> int:\n return self.start_pos\n\n @property\n def end_position(self) -> int:\n return self.end_pos\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def __str__(self) -> str:\n return (\n \"Token: {} {}\".format(self.idx, self.text)\n if self.idx is not None\n else \"Token: {}\".format(self.text)\n )\n\n def __repr__(self) -> str:\n return (\n \"Token: {} {}\".format(self.idx, self.text)\n if self.idx is not None\n else \"Token: {}\".format(self.text)\n )\n\n\nclass Span(DataPoint):\n \"\"\"\n This class represents one textual span consisting of Tokens.\n \"\"\"\n\n def __init__(self, tokens: List[Token]):\n\n super().__init__()\n\n self.tokens = tokens\n self.start_pos = None\n self.end_pos = None\n\n if tokens:\n self.start_pos = tokens[0].start_position\n self.end_pos = tokens[len(tokens) - 1].end_position\n\n @property\n def text(self) -> str:\n return \" \".join([t.text for t in self.tokens])\n\n def to_original_text(self) -> str:\n pos = self.tokens[0].start_pos\n if pos is None:\n return \" \".join([t.text for t in self.tokens])\n str = \"\"\n for t in self.tokens:\n while t.start_pos != pos:\n str += \" \"\n pos += 1\n\n str += t.text\n pos += len(t.text)\n\n return str\n\n def to_dict(self):\n return {\n \"text\": self.to_original_text(),\n \"start_pos\": self.start_pos,\n \"end_pos\": self.end_pos,\n \"labels\": self.labels,\n }\n\n def __str__(self) -> str:\n ids = \",\".join([str(t.idx) for t in self.tokens])\n label_string = \" \".join([str(label) for label in self.labels])\n labels = f' [− Labels: {label_string}]' if self.labels is not None else \"\"\n return (\n 'Span [{}]: \"{}\"{}'.format(ids, self.text, labels)\n )\n\n def __repr__(self) -> str:\n ids = \",\".join([str(t.idx) for t in self.tokens])\n return (\n '<{}-span ({}): \"{}\">'.format(self.tag, ids, self.text)\n if self.tag is not None\n else '<span ({}): \"{}\">'.format(ids, self.text)\n )\n\n def __getitem__(self, idx: int) -> Token:\n return self.tokens[idx]\n\n def __iter__(self):\n return iter(self.tokens)\n\n def __len__(self) -> int:\n return len(self.tokens)\n\n @property\n def tag(self):\n return self.labels[0].value\n\n @property\n def score(self):\n return self.labels[0].score\n\n\nclass Tokenizer(ABC):\n r\"\"\"An abstract class representing a :class:`Tokenizer`.\n\n Tokenizers are used to represent algorithms and models to split plain text into\n individual tokens / words. All subclasses should overwrite :meth:`tokenize`, which\n splits the given plain text into tokens. Moreover, subclasses may overwrite\n :meth:`name`, returning a unique identifier representing the tokenizer's\n configuration.\n \"\"\"\n\n @abstractmethod\n def tokenize(self, text: str) -> List[Token]:\n raise NotImplementedError()\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SpaceTokenizer' instead.\")\ndef space_tokenizer(text: str) -> List[Token]:\n # We don't want to create a SpaceTokenizer object each time this function is called,\n # so delegate the call directly to the static run_tokenize method\n from flair.tokenization import SpaceTokenizer\n return SpaceTokenizer.run_tokenize(text)\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SegtokTokenizer' instead.\")\ndef segtok_tokenizer(text: str) -> List[Token]:\n # We don't want to create a SegtokTokenizer object each time this function is called,\n # so delegate the call directly to the static run_tokenize method\n from flair.tokenization import SegtokTokenizer\n return SegtokTokenizer.run_tokenize(text)\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SpacyTokenizer' instead.\")\ndef build_spacy_tokenizer(model) -> Callable[[str], List[Token]]:\n from flair.tokenization import SpacyTokenizer\n spacy_tokenizer = SpacyTokenizer(model)\n\n def tokenizer(text: str) -> List[Token]:\n return spacy_tokenizer.tokenize(text)\n\n return tokenizer\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.JapaneseTokenizer' instead.\")\ndef build_japanese_tokenizer(tokenizer: str = \"MeCab\"):\n from flair.tokenization import JapaneseTokenizer\n japanese_tokenizer = JapaneseTokenizer(tokenizer)\n\n def tokenizer(text: str) -> List[Token]:\n return japanese_tokenizer.tokenize(text)\n\n return tokenizer\n\n\nclass Sentence(DataPoint):\n \"\"\"\n A Sentence is a list of tokens and is used to represent a sentence or text fragment.\n \"\"\"\n\n def __init__(\n self,\n text: Union[str, List[str]] = None,\n use_tokenizer: Union[bool, Tokenizer] = True,\n language_code: str = None,\n start_position: int = None\n ):\n \"\"\"\n Class to hold all meta related to a text (tokens, predictions, language code, ...)\n :param text: original string (sentence), or a list of string tokens (words)\n :param use_tokenizer: a custom tokenizer (default is :class:`SpaceTokenizer`)\n more advanced options are :class:`SegTokTokenizer` to use segtok or :class:`SpacyTokenizer`\n to use Spacy library if available). Check the implementations of abstract class Tokenizer or\n implement your own subclass (if you need it). If instead of providing a Tokenizer, this parameter\n is just set to True (deprecated), :class:`SegtokTokenizer` will be used.\n :param language_code: Language of the sentence\n :param start_position: Start char offset of the sentence in the superordinate document\n \"\"\"\n super().__init__()\n\n self.tokens: List[Token] = []\n\n self._embeddings: Dict = {}\n\n self.language_code: str = language_code\n\n self.start_pos = start_position\n self.end_pos = (\n start_position + len(text) if start_position is not None else None\n )\n\n if isinstance(use_tokenizer, Tokenizer):\n tokenizer = use_tokenizer\n elif hasattr(use_tokenizer, \"__call__\"):\n from flair.tokenization import TokenizerWrapper\n tokenizer = TokenizerWrapper(use_tokenizer)\n elif type(use_tokenizer) == bool:\n from flair.tokenization import SegtokTokenizer, SpaceTokenizer\n tokenizer = SegtokTokenizer() if use_tokenizer else SpaceTokenizer()\n else:\n raise AssertionError(\"Unexpected type of parameter 'use_tokenizer'. \" +\n \"Parameter should be bool, Callable[[str], List[Token]] (deprecated), Tokenizer\")\n\n # if text is passed, instantiate sentence with tokens (words)\n if text is not None:\n if isinstance(text, (list, tuple)):\n [self.add_token(self._restore_windows_1252_characters(token))\n for token in text]\n else:\n text = self._restore_windows_1252_characters(text)\n [self.add_token(token) for token in tokenizer.tokenize(text)]\n\n # log a warning if the dataset is empty\n if text == \"\":\n log.warning(\n \"Warning: An empty Sentence was created! Are there empty strings in your dataset?\"\n )\n\n self.tokenized = None\n\n # some sentences represent a document boundary (but most do not)\n self.is_document_boundary: bool = False\n\n def get_token(self, token_id: int) -> Token:\n for token in self.tokens:\n if token.idx == token_id:\n return token\n\n def add_token(self, token: Union[Token, str]):\n\n if type(token) is str:\n token = Token(token)\n\n token.text = token.text.replace('\\u200c', '')\n token.text = token.text.replace('\\u200b', '')\n token.text = token.text.replace('\\ufe0f', '')\n token.text = token.text.replace('\\ufeff', '')\n\n # data with zero-width characters cannot be handled\n if token.text.strip() == '':\n return\n\n self.tokens.append(token)\n\n # set token idx if not set\n token.sentence = self\n if token.idx is None:\n token.idx = len(self.tokens)\n\n def get_label_names(self):\n label_names = []\n for label in self.labels:\n label_names.append(label.value)\n return label_names\n\n def _add_spans_internal(self, spans: List[Span], label_type: str, min_score):\n\n current_span = []\n\n tags = defaultdict(lambda: 0.0)\n\n previous_tag_value: str = \"O\"\n for token in self:\n\n tag: Label = token.get_tag(label_type)\n tag_value = tag.value\n\n # non-set tags are OUT tags\n if tag_value == \"\" or tag_value == \"O\" or tag_value == \"_\":\n tag_value = \"O-\"\n\n # anything that is not a BIOES tag is a SINGLE tag\n if tag_value[0:2] not in [\"B-\", \"I-\", \"O-\", \"E-\", \"S-\"]:\n tag_value = \"S-\" + tag_value\n\n # anything that is not OUT is IN\n in_span = False\n if tag_value[0:2] not in [\"O-\"]:\n in_span = True\n\n # single and begin tags start a new span\n starts_new_span = False\n if tag_value[0:2] in [\"B-\", \"S-\"]:\n starts_new_span = True\n\n if (\n previous_tag_value[0:2] in [\"S-\"]\n and previous_tag_value[2:] != tag_value[2:]\n and in_span\n ):\n starts_new_span = True\n\n if (starts_new_span or not in_span) and len(current_span) > 0:\n scores = [t.get_labels(label_type)[0].score for t in current_span]\n span_score = sum(scores) / len(scores)\n if span_score > min_score:\n span = Span(current_span)\n span.add_label(\n label_type=label_type,\n value=sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0],\n score=span_score)\n spans.append(span)\n\n current_span = []\n tags = defaultdict(lambda: 0.0)\n\n if in_span:\n current_span.append(token)\n weight = 1.1 if starts_new_span else 1.0\n tags[tag_value[2:]] += weight\n\n # remember previous tag\n previous_tag_value = tag_value\n\n if len(current_span) > 0:\n scores = [t.get_labels(label_type)[0].score for t in current_span]\n span_score = sum(scores) / len(scores)\n if span_score > min_score:\n span = Span(current_span)\n span.add_label(\n label_type=label_type,\n value=sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0],\n score=span_score)\n spans.append(span)\n\n return spans\n\n def get_spans(self, label_type: Optional[str] = None, min_score=-1) -> List[Span]:\n\n spans: List[Span] = []\n\n # if label type is explicitly specified, get spans for this label type\n if label_type:\n return self._add_spans_internal(spans, label_type, min_score)\n\n # else determine all label types in sentence and get all spans\n label_types = []\n for token in self:\n for annotation in token.annotation_layers.keys():\n if annotation not in label_types: label_types.append(annotation)\n\n for label_type in label_types:\n self._add_spans_internal(spans, label_type, min_score)\n return spans\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def get_embedding(self, names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = []\n for embed in sorted(self._embeddings.keys()):\n if names and embed not in names: continue\n embedding = self._embeddings[embed]\n embeddings.append(embedding)\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.Tensor()\n\n def to(self, device: str, pin_memory: bool = False):\n\n # move sentence embeddings to device\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n # move token embeddings to device\n for token in self:\n token.to(device, pin_memory)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n\n # clear sentence embeddings\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n # clear token embeddings\n for token in self:\n token.clear_embeddings(embedding_names)\n\n def to_tagged_string(self, main_tag=None) -> str:\n list = []\n for token in self.tokens:\n list.append(token.text)\n\n tags: List[str] = []\n for label_type in token.annotation_layers.keys():\n\n if main_tag is not None and main_tag != label_type:\n continue\n\n if token.get_labels(label_type)[0].value == \"O\":\n continue\n if token.get_labels(label_type)[0].value == \"_\":\n continue\n\n tags.append(token.get_labels(label_type)[0].value)\n all_tags = \"<\" + \"/\".join(tags) + \">\"\n if all_tags != \"<>\":\n list.append(all_tags)\n return \" \".join(list)\n\n def to_tokenized_string(self) -> str:\n\n if self.tokenized is None:\n self.tokenized = \" \".join([t.text for t in self.tokens])\n\n return self.tokenized\n\n def to_plain_string(self):\n plain = \"\"\n for token in self.tokens:\n plain += token.text\n if token.whitespace_after:\n plain += \" \"\n return plain.rstrip()\n\n def convert_tag_scheme(self, tag_type: str = \"ner\", target_scheme: str = \"iob\"):\n\n tags: List[Label] = []\n for token in self.tokens:\n tags.append(token.get_tag(tag_type))\n\n if target_scheme == \"iob\":\n iob2(tags)\n\n if target_scheme == \"iobes\":\n iob2(tags)\n tags = iob_iobes(tags)\n\n for index, tag in enumerate(tags):\n self.tokens[index].set_label(tag_type, tag)\n\n def infer_space_after(self):\n \"\"\"\n Heuristics in case you wish to infer whitespace_after values for tokenized text. This is useful for some old NLP\n tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with no info of original whitespacing.\n :return:\n \"\"\"\n last_token = None\n quote_count: int = 0\n # infer whitespace after field\n\n for token in self.tokens:\n if token.text == '\"':\n quote_count += 1\n if quote_count % 2 != 0:\n token.whitespace_after = False\n elif last_token is not None:\n last_token.whitespace_after = False\n\n if last_token is not None:\n\n if token.text in [\".\", \":\", \",\", \";\", \")\", \"n't\", \"!\", \"?\"]:\n last_token.whitespace_after = False\n\n if token.text.startswith(\"'\"):\n last_token.whitespace_after = False\n\n if token.text in [\"(\"]:\n token.whitespace_after = False\n\n last_token = token\n return self\n\n def to_original_text(self) -> str:\n if len(self.tokens) > 0 and (self.tokens[0].start_pos is None):\n return \" \".join([t.text for t in self.tokens])\n str = \"\"\n pos = 0\n for t in self.tokens:\n while t.start_pos > pos:\n str += \" \"\n pos += 1\n\n str += t.text\n pos += len(t.text)\n\n return str\n\n def to_dict(self, tag_type: str = None):\n labels = []\n entities = []\n\n if tag_type:\n entities = [span.to_dict() for span in self.get_spans(tag_type)]\n if self.labels:\n labels = [l.to_dict() for l in self.labels]\n\n return {\"text\": self.to_original_text(), \"labels\": labels, \"entities\": entities}\n\n def __getitem__(self, idx: int) -> Token:\n return self.tokens[idx]\n\n def __iter__(self):\n return iter(self.tokens)\n\n def __len__(self) -> int:\n return len(self.tokens)\n\n def __repr__(self):\n tagged_string = self.to_tagged_string()\n tokenized_string = self.to_tokenized_string()\n\n # add Sentence labels to output if they exist\n sentence_labels = f\" − Sentence-Labels: {self.annotation_layers}\" if self.annotation_layers != {} else \"\"\n\n # add Token labels to output if they exist\n token_labels = f' − Token-Labels: \"{tagged_string}\"' if tokenized_string != tagged_string else \"\"\n\n return f'Sentence: \"{tokenized_string}\" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'\n\n def __copy__(self):\n s = Sentence()\n for token in self.tokens:\n nt = Token(token.text)\n for tag_type in token.tags:\n nt.add_label(\n tag_type,\n token.get_tag(tag_type).value,\n token.get_tag(tag_type).score,\n )\n\n s.add_token(nt)\n return s\n\n def __str__(self) -> str:\n\n tagged_string = self.to_tagged_string()\n tokenized_string = self.to_tokenized_string()\n\n # add Sentence labels to output if they exist\n sentence_labels = f\" − Sentence-Labels: {self.annotation_layers}\" if self.annotation_layers != {} else \"\"\n\n # add Token labels to output if they exist\n token_labels = f' − Token-Labels: \"{tagged_string}\"' if tokenized_string != tagged_string else \"\"\n\n return f'Sentence: \"{tokenized_string}\" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'\n\n def get_language_code(self) -> str:\n if self.language_code is None:\n import langdetect\n\n try:\n self.language_code = langdetect.detect(self.to_plain_string())\n except:\n self.language_code = \"en\"\n\n return self.language_code\n\n @staticmethod\n def _restore_windows_1252_characters(text: str) -> str:\n def to_windows_1252(match):\n try:\n return bytes([ord(match.group(0))]).decode(\"windows-1252\")\n except UnicodeDecodeError:\n # No character at the corresponding code point: remove it\n return \"\"\n\n return re.sub(r\"[\\u0080-\\u0099]\", to_windows_1252, text)\n\n def next_sentence(self):\n \"\"\"\n Get the next sentence in the document (works only if context is set through dataloader or elsewhere)\n :return: next Sentence in document if set, otherwise None\n \"\"\"\n if '_next_sentence' in self.__dict__.keys():\n return self._next_sentence\n\n if '_position_in_dataset' in self.__dict__.keys():\n dataset = self._position_in_dataset[0]\n index = self._position_in_dataset[1] + 1\n if index < len(dataset):\n return dataset[index]\n\n return None\n\n def previous_sentence(self):\n \"\"\"\n Get the previous sentence in the document (works only if context is set through dataloader or elsewhere)\n :return: previous Sentence in document if set, otherwise None\n \"\"\"\n if '_previous_sentence' in self.__dict__.keys():\n return self._previous_sentence\n\n if '_position_in_dataset' in self.__dict__.keys():\n dataset = self._position_in_dataset[0]\n index = self._position_in_dataset[1] - 1\n if index >= 0:\n return dataset[index]\n\n return None\n\n def is_context_set(self) -> bool:\n \"\"\"\n Return True or False depending on whether context is set (for instance in dataloader or elsewhere)\n :return: True if context is set, else False\n \"\"\"\n return '_previous_sentence' in self.__dict__.keys() or '_position_in_dataset' in self.__dict__.keys()\n\n\nclass Image(DataPoint):\n\n def __init__(self, data=None, imageURL=None):\n super().__init__()\n\n self.data = data\n self._embeddings: Dict = {}\n self.imageURL = imageURL\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def __str__(self):\n\n image_repr = self.data.size() if self.data else \"\"\n image_url = self.imageURL if self.imageURL else \"\"\n\n return f\"Image: {image_repr} {image_url}\"\n\n def get_embedding(self) -> torch.tensor:\n embeddings = [\n self._embeddings[embed] for embed in sorted(self._embeddings.keys())\n ]\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.tensor([], device=flair.device)\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def to(self, device: str, pin_memory: bool = False):\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n\nclass FlairDataset(Dataset):\n @abstractmethod\n def is_in_memory(self) -> bool:\n pass\n\n\nclass Corpus:\n def __init__(\n self,\n train: FlairDataset,\n dev: FlairDataset = None,\n test: FlairDataset = None,\n name: str = \"corpus\",\n sample_missing_splits: bool = True,\n ):\n # set name\n self.name: str = name\n\n # sample test data if none is provided\n if test is None and sample_missing_splits:\n train_length = len(train)\n test_size: int = round(train_length / 10)\n splits = randomly_split_into_two_datasets(train, test_size)\n test = splits[0]\n train = splits[1]\n\n # sample dev data if none is provided\n if dev is None and sample_missing_splits:\n train_length = len(train)\n dev_size: int = round(train_length / 10)\n splits = randomly_split_into_two_datasets(train, dev_size)\n dev = splits[0]\n train = splits[1]\n\n # set train dev and test data\n self._train: FlairDataset = train\n self._test: FlairDataset = test\n self._dev: FlairDataset = dev\n\n @property\n def train(self) -> FlairDataset:\n return self._train\n\n @property\n def dev(self) -> FlairDataset:\n return self._dev\n\n @property\n def test(self) -> FlairDataset:\n return self._test\n\n def downsample(self, percentage: float = 0.1, downsample_train=True, downsample_dev=True, downsample_test=True):\n\n if downsample_train:\n self._train = self._downsample_to_proportion(self.train, percentage)\n\n if downsample_dev:\n self._dev = self._downsample_to_proportion(self.dev, percentage)\n\n if downsample_test:\n self._test = self._downsample_to_proportion(self.test, percentage)\n\n return self\n\n def filter_empty_sentences(self):\n log.info(\"Filtering empty sentences\")\n self._train = Corpus._filter_empty_sentences(self._train)\n self._test = Corpus._filter_empty_sentences(self._test)\n self._dev = Corpus._filter_empty_sentences(self._dev)\n log.info(self)\n\n def filter_long_sentences(self, max_charlength: int):\n log.info(\"Filtering long sentences\")\n self._train = Corpus._filter_long_sentences(self._train, max_charlength)\n self._test = Corpus._filter_long_sentences(self._test, max_charlength)\n self._dev = Corpus._filter_long_sentences(self._dev, max_charlength)\n log.info(self)\n\n @staticmethod\n def _filter_long_sentences(dataset, max_charlength: int) -> Dataset:\n\n # find out empty sentence indices\n empty_sentence_indices = []\n non_empty_sentence_indices = []\n index = 0\n\n from flair.datasets import DataLoader\n\n for batch in DataLoader(dataset):\n for sentence in batch:\n if len(sentence.to_plain_string()) > max_charlength:\n empty_sentence_indices.append(index)\n else:\n non_empty_sentence_indices.append(index)\n index += 1\n\n # create subset of non-empty sentence indices\n subset = Subset(dataset, non_empty_sentence_indices)\n\n return subset\n\n @staticmethod\n def _filter_empty_sentences(dataset) -> Dataset:\n\n # find out empty sentence indices\n empty_sentence_indices = []\n non_empty_sentence_indices = []\n index = 0\n\n from flair.datasets import DataLoader\n\n for batch in DataLoader(dataset):\n for sentence in batch:\n if len(sentence) == 0:\n empty_sentence_indices.append(index)\n else:\n non_empty_sentence_indices.append(index)\n index += 1\n\n # create subset of non-empty sentence indices\n subset = Subset(dataset, non_empty_sentence_indices)\n\n return subset\n\n def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:\n \"\"\"\n Creates a dictionary of all tokens contained in the corpus.\n By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.\n If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.\n If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered\n to be added to the dictionary.\n :param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)\n :param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)\n :return: dictionary of tokens\n \"\"\"\n tokens = self._get_most_common_tokens(max_tokens, min_freq)\n\n vocab_dictionary: Dictionary = Dictionary()\n for token in tokens:\n vocab_dictionary.add_item(token)\n\n return vocab_dictionary\n\n def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:\n tokens_and_frequencies = Counter(self._get_all_tokens())\n tokens_and_frequencies = tokens_and_frequencies.most_common()\n\n tokens = []\n for token, freq in tokens_and_frequencies:\n if (min_freq != -1 and freq < min_freq) or (\n max_tokens != -1 and len(tokens) == max_tokens\n ):\n break\n tokens.append(token)\n return tokens\n\n def _get_all_tokens(self) -> List[str]:\n tokens = list(map((lambda s: s.tokens), self.train))\n tokens = [token for sublist in tokens for token in sublist]\n return list(map((lambda t: t.text), tokens))\n\n @staticmethod\n def _downsample_to_proportion(dataset: Dataset, proportion: float):\n\n sampled_size: int = round(len(dataset) * proportion)\n splits = randomly_split_into_two_datasets(dataset, sampled_size)\n return splits[0]\n\n def obtain_statistics(\n self, label_type: str = None, pretty_print: bool = True\n ) -> dict:\n \"\"\"\n Print statistics about the class distribution (only labels of sentences are taken into account) and sentence\n sizes.\n \"\"\"\n json_string = {\n \"TRAIN\": self._obtain_statistics_for(self.train, \"TRAIN\", label_type),\n \"TEST\": self._obtain_statistics_for(self.test, \"TEST\", label_type),\n \"DEV\": self._obtain_statistics_for(self.dev, \"DEV\", label_type),\n }\n if pretty_print:\n import json\n\n json_string = json.dumps(json_string, indent=4)\n return json_string\n\n @staticmethod\n def _obtain_statistics_for(sentences, name, tag_type) -> dict:\n if len(sentences) == 0:\n return {}\n\n classes_to_count = Corpus._count_sentence_labels(sentences)\n tags_to_count = Corpus._count_token_labels(sentences, tag_type)\n tokens_per_sentence = Corpus._get_tokens_per_sentence(sentences)\n\n label_size_dict = {}\n for l, c in classes_to_count.items():\n label_size_dict[l] = c\n\n tag_size_dict = {}\n for l, c in tags_to_count.items():\n tag_size_dict[l] = c\n\n return {\n \"dataset\": name,\n \"total_number_of_documents\": len(sentences),\n \"number_of_documents_per_class\": label_size_dict,\n \"number_of_tokens_per_tag\": tag_size_dict,\n \"number_of_tokens\": {\n \"total\": sum(tokens_per_sentence),\n \"min\": min(tokens_per_sentence),\n \"max\": max(tokens_per_sentence),\n \"avg\": sum(tokens_per_sentence) / len(sentences),\n },\n }\n\n @staticmethod\n def _get_tokens_per_sentence(sentences):\n return list(map(lambda x: len(x.tokens), sentences))\n\n @staticmethod\n def _count_sentence_labels(sentences):\n label_count = defaultdict(lambda: 0)\n for sent in sentences:\n for label in sent.labels:\n label_count[label.value] += 1\n return label_count\n\n @staticmethod\n def _count_token_labels(sentences, label_type):\n label_count = defaultdict(lambda: 0)\n for sent in sentences:\n for token in sent.tokens:\n if label_type in token.annotation_layers.keys():\n label = token.get_tag(label_type)\n label_count[label.value] += 1\n return label_count\n\n def __str__(self) -> str:\n return \"Corpus: %d train + %d dev + %d test sentences\" % (\n len(self.train) if self.train else 0,\n len(self.dev) if self.dev else 0,\n len(self.test) if self.test else 0,\n )\n\n def make_label_dictionary(self, label_type: str = None) -> Dictionary:\n \"\"\"\n Creates a dictionary of all labels assigned to the sentences in the corpus.\n :return: dictionary of labels\n \"\"\"\n label_dictionary: Dictionary = Dictionary(add_unk=False)\n label_dictionary.multi_label = False\n\n from flair.datasets import DataLoader\n\n data = ConcatDataset([self.train, self.test])\n loader = DataLoader(data, batch_size=1)\n\n log.info(\"Computing label dictionary. Progress:\")\n for batch in Tqdm.tqdm(iter(loader)):\n\n for sentence in batch:\n\n # check if sentence itself has labels\n labels = sentence.get_labels(label_type) if label_type is not None else sentence.labels\n\n for label in labels:\n label_dictionary.add_item(label.value)\n\n # check for labels of words\n if isinstance(sentence, Sentence):\n for token in sentence.tokens:\n for label in token.get_labels(label_type):\n label_dictionary.add_item(label.value)\n\n if not label_dictionary.multi_label:\n if len(labels) > 1:\n label_dictionary.multi_label = True\n\n log.info(label_dictionary.idx2item)\n\n return label_dictionary\n\n def get_label_distribution(self):\n class_to_count = defaultdict(lambda: 0)\n for sent in self.train:\n for label in sent.labels:\n class_to_count[label.value] += 1\n return class_to_count\n\n def get_all_sentences(self) -> Dataset:\n parts = []\n if self.train: parts.append(self.train)\n if self.dev: parts.append(self.dev)\n if self.test: parts.append(self.test)\n return ConcatDataset(parts)\n\n def make_tag_dictionary(self, tag_type: str) -> Dictionary:\n\n # Make the tag dictionary\n tag_dictionary: Dictionary = Dictionary()\n tag_dictionary.add_item(\"O\")\n for sentence in self.get_all_sentences():\n for token in sentence.tokens:\n tag_dictionary.add_item(token.get_tag(tag_type).value)\n tag_dictionary.add_item(\"<START>\")\n tag_dictionary.add_item(\"<STOP>\")\n return tag_dictionary\n\n\nclass MultiCorpus(Corpus):\n def __init__(self, corpora: List[Corpus], name: str = \"multicorpus\", **corpusargs):\n self.corpora: List[Corpus] = corpora\n\n train_parts = []\n dev_parts = []\n test_parts = []\n for corpus in self.corpora:\n if corpus.train: train_parts.append(corpus.train)\n if corpus.dev: dev_parts.append(corpus.dev)\n if corpus.test: test_parts.append(corpus.test)\n\n super(MultiCorpus, self).__init__(\n ConcatDataset(train_parts) if len(train_parts) > 0 else None,\n ConcatDataset(dev_parts) if len(dev_parts) > 0 else None,\n ConcatDataset(test_parts) if len(test_parts) > 0 else None,\n name=name,\n **corpusargs,\n )\n\n def __str__(self):\n output = f\"MultiCorpus: \" \\\n f\"{len(self.train) if self.train else 0} train + \" \\\n f\"{len(self.dev) if self.dev else 0} dev + \" \\\n f\"{len(self.test) if self.test else 0} test sentences\\n - \"\n output += \"\\n - \".join([f'{type(corpus).__name__} {str(corpus)}' for corpus in self.corpora])\n return output\n\n\ndef iob2(tags):\n \"\"\"\n Check that tags have a valid IOB format.\n Tags in IOB1 format are converted to IOB2.\n \"\"\"\n for i, tag in enumerate(tags):\n if tag.value == \"O\":\n continue\n split = tag.value.split(\"-\")\n if len(split) != 2 or split[0] not in [\"I\", \"B\"]:\n return False\n if split[0] == \"B\":\n continue\n elif i == 0 or tags[i - 1].value == \"O\": # conversion IOB1 to IOB2\n tags[i].value = \"B\" + tag.value[1:]\n elif tags[i - 1].value[1:] == tag.value[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i].value = \"B\" + tag.value[1:]\n return True\n\n\ndef iob_iobes(tags):\n \"\"\"\n IOB -> IOBES\n \"\"\"\n new_tags = []\n for i, tag in enumerate(tags):\n if tag.value == \"O\":\n new_tags.append(tag.value)\n elif tag.value.split(\"-\")[0] == \"B\":\n if i + 1 != len(tags) and tags[i + 1].value.split(\"-\")[0] == \"I\":\n new_tags.append(tag.value)\n else:\n new_tags.append(tag.value.replace(\"B-\", \"S-\"))\n elif tag.value.split(\"-\")[0] == \"I\":\n if i + 1 < len(tags) and tags[i + 1].value.split(\"-\")[0] == \"I\":\n new_tags.append(tag.value)\n else:\n new_tags.append(tag.value.replace(\"I-\", \"E-\"))\n else:\n raise Exception(\"Invalid IOB format!\")\n return new_tags\n\n\ndef randomly_split_into_two_datasets(dataset, length_of_first):\n import random\n indices = [i for i in range(len(dataset))]\n random.shuffle(indices)\n\n first_dataset = indices[:length_of_first]\n second_dataset = indices[length_of_first:]\n first_dataset.sort()\n second_dataset.sort()\n\n return [Subset(dataset, first_dataset), Subset(dataset, second_dataset)]\n", "path": "flair/data.py" } ]
[ { "content": "import torch, flair\nimport logging\nimport re\n\nfrom abc import abstractmethod, ABC\n\nfrom collections import Counter\nfrom collections import defaultdict\n\nfrom deprecated import deprecated\nfrom flair.file_utils import Tqdm\nfrom operator import itemgetter\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.dataset import ConcatDataset, Subset\n\nfrom typing import List, Dict, Union, Callable, Optional\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Dictionary:\n \"\"\"\n This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings.\n \"\"\"\n\n def __init__(self, add_unk=True):\n # init dictionaries\n self.item2idx: Dict[str, int] = {}\n self.idx2item: List[str] = []\n self.multi_label: bool = False\n\n # in order to deal with unknown tokens, add <unk>\n if add_unk:\n self.add_item(\"<unk>\")\n\n def add_item(self, item: str) -> int:\n \"\"\"\n add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.\n :param item: a string for which to assign an id.\n :return: ID of string\n \"\"\"\n item = item.encode(\"utf-8\")\n if item not in self.item2idx:\n self.idx2item.append(item)\n self.item2idx[item] = len(self.idx2item) - 1\n return self.item2idx[item]\n\n def get_idx_for_item(self, item: str) -> int:\n \"\"\"\n returns the ID of the string, otherwise 0\n :param item: string for which ID is requested\n :return: ID of string, otherwise 0\n \"\"\"\n item = item.encode(\"utf-8\")\n if item in self.item2idx.keys():\n return self.item2idx[item]\n else:\n return 0\n\n def get_idx_for_items(self, items: List[str]) -> List[int]:\n \"\"\"\n returns the IDs for each item of the list of string, otherwise 0 if not found\n :param items: List of string for which IDs are requested\n :return: List of ID of strings\n \"\"\"\n if not hasattr(self, \"item2idx_not_encoded\"):\n d = dict(\n [(key.decode(\"UTF-8\"), value) for key, value in self.item2idx.items()]\n )\n self.item2idx_not_encoded = defaultdict(int, d)\n\n if not items:\n return []\n results = itemgetter(*items)(self.item2idx_not_encoded)\n if isinstance(results, int):\n return [results]\n return list(results)\n\n def get_items(self) -> List[str]:\n items = []\n for item in self.idx2item:\n items.append(item.decode(\"UTF-8\"))\n return items\n\n def __len__(self) -> int:\n return len(self.idx2item)\n\n def get_item_for_index(self, idx):\n return self.idx2item[idx].decode(\"UTF-8\")\n\n def save(self, savefile):\n import pickle\n\n with open(savefile, \"wb\") as f:\n mappings = {\"idx2item\": self.idx2item, \"item2idx\": self.item2idx}\n pickle.dump(mappings, f)\n\n @classmethod\n def load_from_file(cls, filename: str):\n import pickle\n\n dictionary: Dictionary = Dictionary()\n with open(filename, \"rb\") as f:\n mappings = pickle.load(f, encoding=\"latin1\")\n idx2item = mappings[\"idx2item\"]\n item2idx = mappings[\"item2idx\"]\n dictionary.item2idx = item2idx\n dictionary.idx2item = idx2item\n return dictionary\n\n @classmethod\n def load(cls, name: str):\n from flair.file_utils import cached_path\n hu_path: str = \"https://flair.informatik.hu-berlin.de/resources/characters\"\n if name == \"chars\" or name == \"common-chars\":\n char_dict = cached_path(f\"{hu_path}/common_characters\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n if name == \"chars-large\" or name == \"common-chars-large\":\n char_dict = cached_path(f\"{hu_path}/common_characters_large\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n if name == \"chars-xl\" or name == \"common-chars-xl\":\n char_dict = cached_path(f\"{hu_path}/common_characters_xl\", cache_dir=\"datasets\")\n return Dictionary.load_from_file(char_dict)\n\n return Dictionary.load_from_file(name)\n\n def __str__(self):\n tags = ', '.join(self.get_item_for_index(i) for i in range(min(len(self), 30)))\n return f\"Dictionary with {len(self)} tags: {tags}\"\n\n\nclass Label:\n \"\"\"\n This class represents a label. Each label has a value and optionally a confidence score. The\n score needs to be between 0.0 and 1.0. Default value for the score is 1.0.\n \"\"\"\n\n def __init__(self, value: str, score: float = 1.0):\n self.value = value\n self.score = score\n super().__init__()\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n if not value and value != \"\":\n raise ValueError(\n \"Incorrect label value provided. Label value needs to be set.\"\n )\n else:\n self._value = value\n\n @property\n def score(self):\n return self._score\n\n @score.setter\n def score(self, score):\n if 0.0 <= score <= 1.0:\n self._score = score\n else:\n self._score = 1.0\n\n def to_dict(self):\n return {\"value\": self.value, \"confidence\": self.score}\n\n def __str__(self):\n return f\"{self._value} ({round(self._score, 4)})\"\n\n def __repr__(self):\n return f\"{self._value} ({round(self._score, 4)})\"\n\n\nclass DataPoint:\n \"\"\"\n This is the parent class of all data points in Flair (including Token, Sentence, Image, etc.). Each DataPoint\n must be embeddable (hence the abstract property embedding() and methods to() and clear_embeddings()). Also,\n each DataPoint may have Labels in several layers of annotation (hence the functions add_label(), get_labels()\n and the property 'label')\n \"\"\"\n\n def __init__(self):\n self.annotation_layers = {}\n\n @property\n @abstractmethod\n def embedding(self):\n pass\n\n @abstractmethod\n def to(self, device: str, pin_memory: bool = False):\n pass\n\n @abstractmethod\n def clear_embeddings(self, embedding_names: List[str] = None):\n pass\n\n def add_label(self, label_type: str, value: str, score: float = 1.):\n\n if label_type not in self.annotation_layers:\n self.annotation_layers[label_type] = [Label(value, score)]\n else:\n self.annotation_layers[label_type].append(Label(value, score))\n\n return self\n\n def set_label(self, label_type: str, value: str, score: float = 1.):\n self.annotation_layers[label_type] = [Label(value, score)]\n\n return self\n\n def remove_labels(self, label_type: str):\n if label_type in self.annotation_layers.keys():\n del self.annotation_layers[label_type]\n\n def get_labels(self, label_type: str = None):\n if label_type is None:\n return self.labels\n\n return self.annotation_layers[label_type] if label_type in self.annotation_layers else []\n\n @property\n def labels(self) -> List[Label]:\n all_labels = []\n for key in self.annotation_layers.keys():\n all_labels.extend(self.annotation_layers[key])\n return all_labels\n\n\nclass DataPair(DataPoint):\n def __init__(self, first: DataPoint, second: DataPoint):\n super().__init__()\n self.first = first\n self.second = second\n\n def to(self, device: str, pin_memory: bool = False):\n self.first.to(device, pin_memory)\n self.second.to(device, pin_memory)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n self.first.clear_embeddings(embedding_names)\n self.second.clear_embeddings(embedding_names)\n\n @property\n def embedding(self):\n return torch.cat([self.first.embedding, self.second.embedding])\n\n def __str__(self):\n return f\"DataPair:\\n − First {self.first}\\n − Second {self.second}\\n − Labels: {self.labels}\"\n\n def to_plain_string(self):\n return f\"DataPair: First {self.first} || Second {self.second}\"\n\n def __len__(self):\n return len(self.first) + len(self.second)\n\n\nclass Token(DataPoint):\n \"\"\"\n This class represents one word in a tokenized sentence. Each token may have any number of tags. It may also point\n to its head in a dependency tree.\n \"\"\"\n\n def __init__(\n self,\n text: str,\n idx: int = None,\n head_id: int = None,\n whitespace_after: bool = True,\n start_position: int = None,\n ):\n super().__init__()\n\n self.text: str = text\n self.idx: int = idx\n self.head_id: int = head_id\n self.whitespace_after: bool = whitespace_after\n\n self.start_pos = start_position\n self.end_pos = (\n start_position + len(text) if start_position is not None else None\n )\n\n self.sentence: Sentence = None\n self._embeddings: Dict = {}\n self.tags_proba_dist: Dict[str, List[Label]] = {}\n\n def add_tag_label(self, tag_type: str, tag: Label):\n self.set_label(tag_type, tag.value, tag.score)\n\n def add_tags_proba_dist(self, tag_type: str, tags: List[Label]):\n self.tags_proba_dist[tag_type] = tags\n\n def add_tag(self, tag_type: str, tag_value: str, confidence=1.0):\n self.set_label(tag_type, tag_value, confidence)\n\n def get_tag(self, label_type):\n if len(self.get_labels(label_type)) == 0: return Label('')\n return self.get_labels(label_type)[0]\n\n def get_tags_proba_dist(self, tag_type: str) -> List[Label]:\n if tag_type in self.tags_proba_dist:\n return self.tags_proba_dist[tag_type]\n return []\n\n def get_head(self):\n return self.sentence.get_token(self.head_id)\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def to(self, device: str, pin_memory: bool = False):\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n def get_each_embedding(self, embedding_names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = []\n for embed in sorted(self._embeddings.keys()):\n if embedding_names and embed not in embedding_names: continue\n embed = self._embeddings[embed].to(flair.device)\n if (flair.embedding_storage_mode == \"cpu\") and embed.device != flair.device:\n embed = embed.to(flair.device)\n embeddings.append(embed)\n return embeddings\n\n def get_embedding(self, names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = self.get_each_embedding(names)\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.tensor([], device=flair.device)\n\n @property\n def start_position(self) -> int:\n return self.start_pos\n\n @property\n def end_position(self) -> int:\n return self.end_pos\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def __str__(self) -> str:\n return (\n \"Token: {} {}\".format(self.idx, self.text)\n if self.idx is not None\n else \"Token: {}\".format(self.text)\n )\n\n def __repr__(self) -> str:\n return (\n \"Token: {} {}\".format(self.idx, self.text)\n if self.idx is not None\n else \"Token: {}\".format(self.text)\n )\n\n\nclass Span(DataPoint):\n \"\"\"\n This class represents one textual span consisting of Tokens.\n \"\"\"\n\n def __init__(self, tokens: List[Token]):\n\n super().__init__()\n\n self.tokens = tokens\n self.start_pos = None\n self.end_pos = None\n\n if tokens:\n self.start_pos = tokens[0].start_position\n self.end_pos = tokens[len(tokens) - 1].end_position\n\n @property\n def text(self) -> str:\n return \" \".join([t.text for t in self.tokens])\n\n def to_original_text(self) -> str:\n pos = self.tokens[0].start_pos\n if pos is None:\n return \" \".join([t.text for t in self.tokens])\n str = \"\"\n for t in self.tokens:\n while t.start_pos != pos:\n str += \" \"\n pos += 1\n\n str += t.text\n pos += len(t.text)\n\n return str\n\n def to_dict(self):\n return {\n \"text\": self.to_original_text(),\n \"start_pos\": self.start_pos,\n \"end_pos\": self.end_pos,\n \"labels\": self.labels,\n }\n\n def __str__(self) -> str:\n ids = \",\".join([str(t.idx) for t in self.tokens])\n label_string = \" \".join([str(label) for label in self.labels])\n labels = f' [− Labels: {label_string}]' if self.labels is not None else \"\"\n return (\n 'Span [{}]: \"{}\"{}'.format(ids, self.text, labels)\n )\n\n def __repr__(self) -> str:\n ids = \",\".join([str(t.idx) for t in self.tokens])\n return (\n '<{}-span ({}): \"{}\">'.format(self.tag, ids, self.text)\n if self.tag is not None\n else '<span ({}): \"{}\">'.format(ids, self.text)\n )\n\n def __getitem__(self, idx: int) -> Token:\n return self.tokens[idx]\n\n def __iter__(self):\n return iter(self.tokens)\n\n def __len__(self) -> int:\n return len(self.tokens)\n\n @property\n def tag(self):\n return self.labels[0].value\n\n @property\n def score(self):\n return self.labels[0].score\n\n\nclass Tokenizer(ABC):\n r\"\"\"An abstract class representing a :class:`Tokenizer`.\n\n Tokenizers are used to represent algorithms and models to split plain text into\n individual tokens / words. All subclasses should overwrite :meth:`tokenize`, which\n splits the given plain text into tokens. Moreover, subclasses may overwrite\n :meth:`name`, returning a unique identifier representing the tokenizer's\n configuration.\n \"\"\"\n\n @abstractmethod\n def tokenize(self, text: str) -> List[Token]:\n raise NotImplementedError()\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SpaceTokenizer' instead.\")\ndef space_tokenizer(text: str) -> List[Token]:\n # We don't want to create a SpaceTokenizer object each time this function is called,\n # so delegate the call directly to the static run_tokenize method\n from flair.tokenization import SpaceTokenizer\n return SpaceTokenizer.run_tokenize(text)\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SegtokTokenizer' instead.\")\ndef segtok_tokenizer(text: str) -> List[Token]:\n # We don't want to create a SegtokTokenizer object each time this function is called,\n # so delegate the call directly to the static run_tokenize method\n from flair.tokenization import SegtokTokenizer\n return SegtokTokenizer.run_tokenize(text)\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.SpacyTokenizer' instead.\")\ndef build_spacy_tokenizer(model) -> Callable[[str], List[Token]]:\n from flair.tokenization import SpacyTokenizer\n spacy_tokenizer = SpacyTokenizer(model)\n\n def tokenizer(text: str) -> List[Token]:\n return spacy_tokenizer.tokenize(text)\n\n return tokenizer\n\n\n@deprecated(version=\"0.5\", reason=\"Use 'flair.tokenization.JapaneseTokenizer' instead.\")\ndef build_japanese_tokenizer(tokenizer: str = \"MeCab\"):\n from flair.tokenization import JapaneseTokenizer\n japanese_tokenizer = JapaneseTokenizer(tokenizer)\n\n def tokenizer(text: str) -> List[Token]:\n return japanese_tokenizer.tokenize(text)\n\n return tokenizer\n\n\nclass Sentence(DataPoint):\n \"\"\"\n A Sentence is a list of tokens and is used to represent a sentence or text fragment.\n \"\"\"\n\n def __init__(\n self,\n text: Union[str, List[str]] = None,\n use_tokenizer: Union[bool, Tokenizer] = True,\n language_code: str = None,\n start_position: int = None\n ):\n \"\"\"\n Class to hold all meta related to a text (tokens, predictions, language code, ...)\n :param text: original string (sentence), or a list of string tokens (words)\n :param use_tokenizer: a custom tokenizer (default is :class:`SpaceTokenizer`)\n more advanced options are :class:`SegTokTokenizer` to use segtok or :class:`SpacyTokenizer`\n to use Spacy library if available). Check the implementations of abstract class Tokenizer or\n implement your own subclass (if you need it). If instead of providing a Tokenizer, this parameter\n is just set to True (deprecated), :class:`SegtokTokenizer` will be used.\n :param language_code: Language of the sentence\n :param start_position: Start char offset of the sentence in the superordinate document\n \"\"\"\n super().__init__()\n\n self.tokens: List[Token] = []\n\n self._embeddings: Dict = {}\n\n self.language_code: str = language_code\n\n self.start_pos = start_position\n self.end_pos = (\n start_position + len(text) if start_position is not None else None\n )\n\n if isinstance(use_tokenizer, Tokenizer):\n tokenizer = use_tokenizer\n elif hasattr(use_tokenizer, \"__call__\"):\n from flair.tokenization import TokenizerWrapper\n tokenizer = TokenizerWrapper(use_tokenizer)\n elif type(use_tokenizer) == bool:\n from flair.tokenization import SegtokTokenizer, SpaceTokenizer\n tokenizer = SegtokTokenizer() if use_tokenizer else SpaceTokenizer()\n else:\n raise AssertionError(\"Unexpected type of parameter 'use_tokenizer'. \" +\n \"Parameter should be bool, Callable[[str], List[Token]] (deprecated), Tokenizer\")\n\n # if text is passed, instantiate sentence with tokens (words)\n if text is not None:\n if isinstance(text, (list, tuple)):\n [self.add_token(self._restore_windows_1252_characters(token))\n for token in text]\n else:\n text = self._restore_windows_1252_characters(text)\n [self.add_token(token) for token in tokenizer.tokenize(text)]\n\n # log a warning if the dataset is empty\n if text == \"\":\n log.warning(\n \"Warning: An empty Sentence was created! Are there empty strings in your dataset?\"\n )\n\n self.tokenized = None\n\n # some sentences represent a document boundary (but most do not)\n self.is_document_boundary: bool = False\n\n def get_token(self, token_id: int) -> Token:\n for token in self.tokens:\n if token.idx == token_id:\n return token\n\n def add_token(self, token: Union[Token, str]):\n\n if type(token) is str:\n token = Token(token)\n\n token.text = token.text.replace('\\u200c', '')\n token.text = token.text.replace('\\u200b', '')\n token.text = token.text.replace('\\ufe0f', '')\n token.text = token.text.replace('\\ufeff', '')\n\n # data with zero-width characters cannot be handled\n if token.text == '':\n return\n\n self.tokens.append(token)\n\n # set token idx if not set\n token.sentence = self\n if token.idx is None:\n token.idx = len(self.tokens)\n\n def get_label_names(self):\n label_names = []\n for label in self.labels:\n label_names.append(label.value)\n return label_names\n\n def _add_spans_internal(self, spans: List[Span], label_type: str, min_score):\n\n current_span = []\n\n tags = defaultdict(lambda: 0.0)\n\n previous_tag_value: str = \"O\"\n for token in self:\n\n tag: Label = token.get_tag(label_type)\n tag_value = tag.value\n\n # non-set tags are OUT tags\n if tag_value == \"\" or tag_value == \"O\" or tag_value == \"_\":\n tag_value = \"O-\"\n\n # anything that is not a BIOES tag is a SINGLE tag\n if tag_value[0:2] not in [\"B-\", \"I-\", \"O-\", \"E-\", \"S-\"]:\n tag_value = \"S-\" + tag_value\n\n # anything that is not OUT is IN\n in_span = False\n if tag_value[0:2] not in [\"O-\"]:\n in_span = True\n\n # single and begin tags start a new span\n starts_new_span = False\n if tag_value[0:2] in [\"B-\", \"S-\"]:\n starts_new_span = True\n\n if (\n previous_tag_value[0:2] in [\"S-\"]\n and previous_tag_value[2:] != tag_value[2:]\n and in_span\n ):\n starts_new_span = True\n\n if (starts_new_span or not in_span) and len(current_span) > 0:\n scores = [t.get_labels(label_type)[0].score for t in current_span]\n span_score = sum(scores) / len(scores)\n if span_score > min_score:\n span = Span(current_span)\n span.add_label(\n label_type=label_type,\n value=sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0],\n score=span_score)\n spans.append(span)\n\n current_span = []\n tags = defaultdict(lambda: 0.0)\n\n if in_span:\n current_span.append(token)\n weight = 1.1 if starts_new_span else 1.0\n tags[tag_value[2:]] += weight\n\n # remember previous tag\n previous_tag_value = tag_value\n\n if len(current_span) > 0:\n scores = [t.get_labels(label_type)[0].score for t in current_span]\n span_score = sum(scores) / len(scores)\n if span_score > min_score:\n span = Span(current_span)\n span.add_label(\n label_type=label_type,\n value=sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0],\n score=span_score)\n spans.append(span)\n\n return spans\n\n def get_spans(self, label_type: Optional[str] = None, min_score=-1) -> List[Span]:\n\n spans: List[Span] = []\n\n # if label type is explicitly specified, get spans for this label type\n if label_type:\n return self._add_spans_internal(spans, label_type, min_score)\n\n # else determine all label types in sentence and get all spans\n label_types = []\n for token in self:\n for annotation in token.annotation_layers.keys():\n if annotation not in label_types: label_types.append(annotation)\n\n for label_type in label_types:\n self._add_spans_internal(spans, label_type, min_score)\n return spans\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def get_embedding(self, names: Optional[List[str]] = None) -> torch.tensor:\n embeddings = []\n for embed in sorted(self._embeddings.keys()):\n if names and embed not in names: continue\n embedding = self._embeddings[embed]\n embeddings.append(embedding)\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.Tensor()\n\n def to(self, device: str, pin_memory: bool = False):\n\n # move sentence embeddings to device\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n # move token embeddings to device\n for token in self:\n token.to(device, pin_memory)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n\n # clear sentence embeddings\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n # clear token embeddings\n for token in self:\n token.clear_embeddings(embedding_names)\n\n def to_tagged_string(self, main_tag=None) -> str:\n list = []\n for token in self.tokens:\n list.append(token.text)\n\n tags: List[str] = []\n for label_type in token.annotation_layers.keys():\n\n if main_tag is not None and main_tag != label_type:\n continue\n\n if token.get_labels(label_type)[0].value == \"O\":\n continue\n if token.get_labels(label_type)[0].value == \"_\":\n continue\n\n tags.append(token.get_labels(label_type)[0].value)\n all_tags = \"<\" + \"/\".join(tags) + \">\"\n if all_tags != \"<>\":\n list.append(all_tags)\n return \" \".join(list)\n\n def to_tokenized_string(self) -> str:\n\n if self.tokenized is None:\n self.tokenized = \" \".join([t.text for t in self.tokens])\n\n return self.tokenized\n\n def to_plain_string(self):\n plain = \"\"\n for token in self.tokens:\n plain += token.text\n if token.whitespace_after:\n plain += \" \"\n return plain.rstrip()\n\n def convert_tag_scheme(self, tag_type: str = \"ner\", target_scheme: str = \"iob\"):\n\n tags: List[Label] = []\n for token in self.tokens:\n tags.append(token.get_tag(tag_type))\n\n if target_scheme == \"iob\":\n iob2(tags)\n\n if target_scheme == \"iobes\":\n iob2(tags)\n tags = iob_iobes(tags)\n\n for index, tag in enumerate(tags):\n self.tokens[index].set_label(tag_type, tag)\n\n def infer_space_after(self):\n \"\"\"\n Heuristics in case you wish to infer whitespace_after values for tokenized text. This is useful for some old NLP\n tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with no info of original whitespacing.\n :return:\n \"\"\"\n last_token = None\n quote_count: int = 0\n # infer whitespace after field\n\n for token in self.tokens:\n if token.text == '\"':\n quote_count += 1\n if quote_count % 2 != 0:\n token.whitespace_after = False\n elif last_token is not None:\n last_token.whitespace_after = False\n\n if last_token is not None:\n\n if token.text in [\".\", \":\", \",\", \";\", \")\", \"n't\", \"!\", \"?\"]:\n last_token.whitespace_after = False\n\n if token.text.startswith(\"'\"):\n last_token.whitespace_after = False\n\n if token.text in [\"(\"]:\n token.whitespace_after = False\n\n last_token = token\n return self\n\n def to_original_text(self) -> str:\n if len(self.tokens) > 0 and (self.tokens[0].start_pos is None):\n return \" \".join([t.text for t in self.tokens])\n str = \"\"\n pos = 0\n for t in self.tokens:\n while t.start_pos > pos:\n str += \" \"\n pos += 1\n\n str += t.text\n pos += len(t.text)\n\n return str\n\n def to_dict(self, tag_type: str = None):\n labels = []\n entities = []\n\n if tag_type:\n entities = [span.to_dict() for span in self.get_spans(tag_type)]\n if self.labels:\n labels = [l.to_dict() for l in self.labels]\n\n return {\"text\": self.to_original_text(), \"labels\": labels, \"entities\": entities}\n\n def __getitem__(self, idx: int) -> Token:\n return self.tokens[idx]\n\n def __iter__(self):\n return iter(self.tokens)\n\n def __len__(self) -> int:\n return len(self.tokens)\n\n def __repr__(self):\n tagged_string = self.to_tagged_string()\n tokenized_string = self.to_tokenized_string()\n\n # add Sentence labels to output if they exist\n sentence_labels = f\" − Sentence-Labels: {self.annotation_layers}\" if self.annotation_layers != {} else \"\"\n\n # add Token labels to output if they exist\n token_labels = f' − Token-Labels: \"{tagged_string}\"' if tokenized_string != tagged_string else \"\"\n\n return f'Sentence: \"{tokenized_string}\" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'\n\n def __copy__(self):\n s = Sentence()\n for token in self.tokens:\n nt = Token(token.text)\n for tag_type in token.tags:\n nt.add_label(\n tag_type,\n token.get_tag(tag_type).value,\n token.get_tag(tag_type).score,\n )\n\n s.add_token(nt)\n return s\n\n def __str__(self) -> str:\n\n tagged_string = self.to_tagged_string()\n tokenized_string = self.to_tokenized_string()\n\n # add Sentence labels to output if they exist\n sentence_labels = f\" − Sentence-Labels: {self.annotation_layers}\" if self.annotation_layers != {} else \"\"\n\n # add Token labels to output if they exist\n token_labels = f' − Token-Labels: \"{tagged_string}\"' if tokenized_string != tagged_string else \"\"\n\n return f'Sentence: \"{tokenized_string}\" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'\n\n def get_language_code(self) -> str:\n if self.language_code is None:\n import langdetect\n\n try:\n self.language_code = langdetect.detect(self.to_plain_string())\n except:\n self.language_code = \"en\"\n\n return self.language_code\n\n @staticmethod\n def _restore_windows_1252_characters(text: str) -> str:\n def to_windows_1252(match):\n try:\n return bytes([ord(match.group(0))]).decode(\"windows-1252\")\n except UnicodeDecodeError:\n # No character at the corresponding code point: remove it\n return \"\"\n\n return re.sub(r\"[\\u0080-\\u0099]\", to_windows_1252, text)\n\n def next_sentence(self):\n \"\"\"\n Get the next sentence in the document (works only if context is set through dataloader or elsewhere)\n :return: next Sentence in document if set, otherwise None\n \"\"\"\n if '_next_sentence' in self.__dict__.keys():\n return self._next_sentence\n\n if '_position_in_dataset' in self.__dict__.keys():\n dataset = self._position_in_dataset[0]\n index = self._position_in_dataset[1] + 1\n if index < len(dataset):\n return dataset[index]\n\n return None\n\n def previous_sentence(self):\n \"\"\"\n Get the previous sentence in the document (works only if context is set through dataloader or elsewhere)\n :return: previous Sentence in document if set, otherwise None\n \"\"\"\n if '_previous_sentence' in self.__dict__.keys():\n return self._previous_sentence\n\n if '_position_in_dataset' in self.__dict__.keys():\n dataset = self._position_in_dataset[0]\n index = self._position_in_dataset[1] - 1\n if index >= 0:\n return dataset[index]\n\n return None\n\n def is_context_set(self) -> bool:\n \"\"\"\n Return True or False depending on whether context is set (for instance in dataloader or elsewhere)\n :return: True if context is set, else False\n \"\"\"\n return '_previous_sentence' in self.__dict__.keys() or '_position_in_dataset' in self.__dict__.keys()\n\n\nclass Image(DataPoint):\n\n def __init__(self, data=None, imageURL=None):\n super().__init__()\n\n self.data = data\n self._embeddings: Dict = {}\n self.imageURL = imageURL\n\n @property\n def embedding(self):\n return self.get_embedding()\n\n def __str__(self):\n\n image_repr = self.data.size() if self.data else \"\"\n image_url = self.imageURL if self.imageURL else \"\"\n\n return f\"Image: {image_repr} {image_url}\"\n\n def get_embedding(self) -> torch.tensor:\n embeddings = [\n self._embeddings[embed] for embed in sorted(self._embeddings.keys())\n ]\n\n if embeddings:\n return torch.cat(embeddings, dim=0)\n\n return torch.tensor([], device=flair.device)\n\n def set_embedding(self, name: str, vector: torch.tensor):\n device = flair.device\n if (flair.embedding_storage_mode == \"cpu\") and len(self._embeddings.keys()) > 0:\n device = next(iter(self._embeddings.values())).device\n if device != vector.device:\n vector = vector.to(device)\n self._embeddings[name] = vector\n\n def to(self, device: str, pin_memory: bool = False):\n for name, vector in self._embeddings.items():\n if str(vector.device) != str(device):\n if pin_memory:\n self._embeddings[name] = vector.to(\n device, non_blocking=True\n ).pin_memory()\n else:\n self._embeddings[name] = vector.to(device, non_blocking=True)\n\n def clear_embeddings(self, embedding_names: List[str] = None):\n if embedding_names is None:\n self._embeddings: Dict = {}\n else:\n for name in embedding_names:\n if name in self._embeddings.keys():\n del self._embeddings[name]\n\n\nclass FlairDataset(Dataset):\n @abstractmethod\n def is_in_memory(self) -> bool:\n pass\n\n\nclass Corpus:\n def __init__(\n self,\n train: FlairDataset,\n dev: FlairDataset = None,\n test: FlairDataset = None,\n name: str = \"corpus\",\n sample_missing_splits: bool = True,\n ):\n # set name\n self.name: str = name\n\n # sample test data if none is provided\n if test is None and sample_missing_splits:\n train_length = len(train)\n test_size: int = round(train_length / 10)\n splits = randomly_split_into_two_datasets(train, test_size)\n test = splits[0]\n train = splits[1]\n\n # sample dev data if none is provided\n if dev is None and sample_missing_splits:\n train_length = len(train)\n dev_size: int = round(train_length / 10)\n splits = randomly_split_into_two_datasets(train, dev_size)\n dev = splits[0]\n train = splits[1]\n\n # set train dev and test data\n self._train: FlairDataset = train\n self._test: FlairDataset = test\n self._dev: FlairDataset = dev\n\n @property\n def train(self) -> FlairDataset:\n return self._train\n\n @property\n def dev(self) -> FlairDataset:\n return self._dev\n\n @property\n def test(self) -> FlairDataset:\n return self._test\n\n def downsample(self, percentage: float = 0.1, downsample_train=True, downsample_dev=True, downsample_test=True):\n\n if downsample_train:\n self._train = self._downsample_to_proportion(self.train, percentage)\n\n if downsample_dev:\n self._dev = self._downsample_to_proportion(self.dev, percentage)\n\n if downsample_test:\n self._test = self._downsample_to_proportion(self.test, percentage)\n\n return self\n\n def filter_empty_sentences(self):\n log.info(\"Filtering empty sentences\")\n self._train = Corpus._filter_empty_sentences(self._train)\n self._test = Corpus._filter_empty_sentences(self._test)\n self._dev = Corpus._filter_empty_sentences(self._dev)\n log.info(self)\n\n def filter_long_sentences(self, max_charlength: int):\n log.info(\"Filtering long sentences\")\n self._train = Corpus._filter_long_sentences(self._train, max_charlength)\n self._test = Corpus._filter_long_sentences(self._test, max_charlength)\n self._dev = Corpus._filter_long_sentences(self._dev, max_charlength)\n log.info(self)\n\n @staticmethod\n def _filter_long_sentences(dataset, max_charlength: int) -> Dataset:\n\n # find out empty sentence indices\n empty_sentence_indices = []\n non_empty_sentence_indices = []\n index = 0\n\n from flair.datasets import DataLoader\n\n for batch in DataLoader(dataset):\n for sentence in batch:\n if len(sentence.to_plain_string()) > max_charlength:\n empty_sentence_indices.append(index)\n else:\n non_empty_sentence_indices.append(index)\n index += 1\n\n # create subset of non-empty sentence indices\n subset = Subset(dataset, non_empty_sentence_indices)\n\n return subset\n\n @staticmethod\n def _filter_empty_sentences(dataset) -> Dataset:\n\n # find out empty sentence indices\n empty_sentence_indices = []\n non_empty_sentence_indices = []\n index = 0\n\n from flair.datasets import DataLoader\n\n for batch in DataLoader(dataset):\n for sentence in batch:\n if len(sentence) == 0:\n empty_sentence_indices.append(index)\n else:\n non_empty_sentence_indices.append(index)\n index += 1\n\n # create subset of non-empty sentence indices\n subset = Subset(dataset, non_empty_sentence_indices)\n\n return subset\n\n def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:\n \"\"\"\n Creates a dictionary of all tokens contained in the corpus.\n By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.\n If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.\n If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered\n to be added to the dictionary.\n :param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)\n :param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)\n :return: dictionary of tokens\n \"\"\"\n tokens = self._get_most_common_tokens(max_tokens, min_freq)\n\n vocab_dictionary: Dictionary = Dictionary()\n for token in tokens:\n vocab_dictionary.add_item(token)\n\n return vocab_dictionary\n\n def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:\n tokens_and_frequencies = Counter(self._get_all_tokens())\n tokens_and_frequencies = tokens_and_frequencies.most_common()\n\n tokens = []\n for token, freq in tokens_and_frequencies:\n if (min_freq != -1 and freq < min_freq) or (\n max_tokens != -1 and len(tokens) == max_tokens\n ):\n break\n tokens.append(token)\n return tokens\n\n def _get_all_tokens(self) -> List[str]:\n tokens = list(map((lambda s: s.tokens), self.train))\n tokens = [token for sublist in tokens for token in sublist]\n return list(map((lambda t: t.text), tokens))\n\n @staticmethod\n def _downsample_to_proportion(dataset: Dataset, proportion: float):\n\n sampled_size: int = round(len(dataset) * proportion)\n splits = randomly_split_into_two_datasets(dataset, sampled_size)\n return splits[0]\n\n def obtain_statistics(\n self, label_type: str = None, pretty_print: bool = True\n ) -> dict:\n \"\"\"\n Print statistics about the class distribution (only labels of sentences are taken into account) and sentence\n sizes.\n \"\"\"\n json_string = {\n \"TRAIN\": self._obtain_statistics_for(self.train, \"TRAIN\", label_type),\n \"TEST\": self._obtain_statistics_for(self.test, \"TEST\", label_type),\n \"DEV\": self._obtain_statistics_for(self.dev, \"DEV\", label_type),\n }\n if pretty_print:\n import json\n\n json_string = json.dumps(json_string, indent=4)\n return json_string\n\n @staticmethod\n def _obtain_statistics_for(sentences, name, tag_type) -> dict:\n if len(sentences) == 0:\n return {}\n\n classes_to_count = Corpus._count_sentence_labels(sentences)\n tags_to_count = Corpus._count_token_labels(sentences, tag_type)\n tokens_per_sentence = Corpus._get_tokens_per_sentence(sentences)\n\n label_size_dict = {}\n for l, c in classes_to_count.items():\n label_size_dict[l] = c\n\n tag_size_dict = {}\n for l, c in tags_to_count.items():\n tag_size_dict[l] = c\n\n return {\n \"dataset\": name,\n \"total_number_of_documents\": len(sentences),\n \"number_of_documents_per_class\": label_size_dict,\n \"number_of_tokens_per_tag\": tag_size_dict,\n \"number_of_tokens\": {\n \"total\": sum(tokens_per_sentence),\n \"min\": min(tokens_per_sentence),\n \"max\": max(tokens_per_sentence),\n \"avg\": sum(tokens_per_sentence) / len(sentences),\n },\n }\n\n @staticmethod\n def _get_tokens_per_sentence(sentences):\n return list(map(lambda x: len(x.tokens), sentences))\n\n @staticmethod\n def _count_sentence_labels(sentences):\n label_count = defaultdict(lambda: 0)\n for sent in sentences:\n for label in sent.labels:\n label_count[label.value] += 1\n return label_count\n\n @staticmethod\n def _count_token_labels(sentences, label_type):\n label_count = defaultdict(lambda: 0)\n for sent in sentences:\n for token in sent.tokens:\n if label_type in token.annotation_layers.keys():\n label = token.get_tag(label_type)\n label_count[label.value] += 1\n return label_count\n\n def __str__(self) -> str:\n return \"Corpus: %d train + %d dev + %d test sentences\" % (\n len(self.train) if self.train else 0,\n len(self.dev) if self.dev else 0,\n len(self.test) if self.test else 0,\n )\n\n def make_label_dictionary(self, label_type: str = None) -> Dictionary:\n \"\"\"\n Creates a dictionary of all labels assigned to the sentences in the corpus.\n :return: dictionary of labels\n \"\"\"\n label_dictionary: Dictionary = Dictionary(add_unk=False)\n label_dictionary.multi_label = False\n\n from flair.datasets import DataLoader\n\n data = ConcatDataset([self.train, self.test])\n loader = DataLoader(data, batch_size=1)\n\n log.info(\"Computing label dictionary. Progress:\")\n for batch in Tqdm.tqdm(iter(loader)):\n\n for sentence in batch:\n\n # check if sentence itself has labels\n labels = sentence.get_labels(label_type) if label_type is not None else sentence.labels\n\n for label in labels:\n label_dictionary.add_item(label.value)\n\n # check for labels of words\n if isinstance(sentence, Sentence):\n for token in sentence.tokens:\n for label in token.get_labels(label_type):\n label_dictionary.add_item(label.value)\n\n if not label_dictionary.multi_label:\n if len(labels) > 1:\n label_dictionary.multi_label = True\n\n log.info(label_dictionary.idx2item)\n\n return label_dictionary\n\n def get_label_distribution(self):\n class_to_count = defaultdict(lambda: 0)\n for sent in self.train:\n for label in sent.labels:\n class_to_count[label.value] += 1\n return class_to_count\n\n def get_all_sentences(self) -> Dataset:\n parts = []\n if self.train: parts.append(self.train)\n if self.dev: parts.append(self.dev)\n if self.test: parts.append(self.test)\n return ConcatDataset(parts)\n\n def make_tag_dictionary(self, tag_type: str) -> Dictionary:\n\n # Make the tag dictionary\n tag_dictionary: Dictionary = Dictionary()\n tag_dictionary.add_item(\"O\")\n for sentence in self.get_all_sentences():\n for token in sentence.tokens:\n tag_dictionary.add_item(token.get_tag(tag_type).value)\n tag_dictionary.add_item(\"<START>\")\n tag_dictionary.add_item(\"<STOP>\")\n return tag_dictionary\n\n\nclass MultiCorpus(Corpus):\n def __init__(self, corpora: List[Corpus], name: str = \"multicorpus\", **corpusargs):\n self.corpora: List[Corpus] = corpora\n\n train_parts = []\n dev_parts = []\n test_parts = []\n for corpus in self.corpora:\n if corpus.train: train_parts.append(corpus.train)\n if corpus.dev: dev_parts.append(corpus.dev)\n if corpus.test: test_parts.append(corpus.test)\n\n super(MultiCorpus, self).__init__(\n ConcatDataset(train_parts) if len(train_parts) > 0 else None,\n ConcatDataset(dev_parts) if len(dev_parts) > 0 else None,\n ConcatDataset(test_parts) if len(test_parts) > 0 else None,\n name=name,\n **corpusargs,\n )\n\n def __str__(self):\n output = f\"MultiCorpus: \" \\\n f\"{len(self.train) if self.train else 0} train + \" \\\n f\"{len(self.dev) if self.dev else 0} dev + \" \\\n f\"{len(self.test) if self.test else 0} test sentences\\n - \"\n output += \"\\n - \".join([f'{type(corpus).__name__} {str(corpus)}' for corpus in self.corpora])\n return output\n\n\ndef iob2(tags):\n \"\"\"\n Check that tags have a valid IOB format.\n Tags in IOB1 format are converted to IOB2.\n \"\"\"\n for i, tag in enumerate(tags):\n if tag.value == \"O\":\n continue\n split = tag.value.split(\"-\")\n if len(split) != 2 or split[0] not in [\"I\", \"B\"]:\n return False\n if split[0] == \"B\":\n continue\n elif i == 0 or tags[i - 1].value == \"O\": # conversion IOB1 to IOB2\n tags[i].value = \"B\" + tag.value[1:]\n elif tags[i - 1].value[1:] == tag.value[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i].value = \"B\" + tag.value[1:]\n return True\n\n\ndef iob_iobes(tags):\n \"\"\"\n IOB -> IOBES\n \"\"\"\n new_tags = []\n for i, tag in enumerate(tags):\n if tag.value == \"O\":\n new_tags.append(tag.value)\n elif tag.value.split(\"-\")[0] == \"B\":\n if i + 1 != len(tags) and tags[i + 1].value.split(\"-\")[0] == \"I\":\n new_tags.append(tag.value)\n else:\n new_tags.append(tag.value.replace(\"B-\", \"S-\"))\n elif tag.value.split(\"-\")[0] == \"I\":\n if i + 1 < len(tags) and tags[i + 1].value.split(\"-\")[0] == \"I\":\n new_tags.append(tag.value)\n else:\n new_tags.append(tag.value.replace(\"I-\", \"E-\"))\n else:\n raise Exception(\"Invalid IOB format!\")\n return new_tags\n\n\ndef randomly_split_into_two_datasets(dataset, length_of_first):\n import random\n indices = [i for i in range(len(dataset))]\n random.shuffle(indices)\n\n first_dataset = indices[:length_of_first]\n second_dataset = indices[length_of_first:]\n first_dataset.sort()\n second_dataset.sort()\n\n return [Subset(dataset, first_dataset), Subset(dataset, second_dataset)]\n", "path": "flair/data.py" } ]
diff --git a/flair/data.py b/flair/data.py index ad561600ef..a82b9ac054 100644 --- a/flair/data.py +++ b/flair/data.py @@ -602,7 +602,7 @@ def add_token(self, token: Union[Token, str]): token.text = token.text.replace('\ufeff', '') # data with zero-width characters cannot be handled - if token.text.strip() == '': + if token.text == '': return self.tokens.append(token) diff --git a/tests/test_data.py b/tests/test_data.py index c5141c9fe0..6a2d162e08 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -47,7 +47,6 @@ def test_get_head(): def test_create_sentence_on_empty_string(): - sentence: Sentence = Sentence("") assert 0 == len(sentence.tokens) @@ -78,15 +77,26 @@ def test_create_sentence_with_tokenizer(): assert "." == sentence.tokens[3].text +def test_create_sentence_with_newline(): + sentence: Sentence = Sentence(["I", "\t", "ich", "\n", "you", "\t", "du", "\n"]) + assert 8 == len(sentence.tokens) + assert "\n" == sentence.tokens[3].text + + sentence: Sentence = Sentence("I \t ich \n you \t du \n", use_tokenizer=False) + assert 8 == len(sentence.tokens) + assert 0 == sentence.tokens[0].start_pos + assert "\n" == sentence.tokens[3].text + + def test_create_sentence_with_custom_tokenizer(): - sentence:Sentence = Sentence("I love Berlin.", use_tokenizer=TokenizerWrapper(no_op_tokenizer)) + sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=TokenizerWrapper(no_op_tokenizer)) assert 1 == len(sentence.tokens) assert 0 == sentence.tokens[0].start_pos assert "I love Berlin." == sentence.tokens[0].text def test_create_sentence_with_callable(): - sentence:Sentence = Sentence("I love Berlin.", use_tokenizer=no_op_tokenizer) + sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=no_op_tokenizer) assert 1 == len(sentence.tokens) assert 0 == sentence.tokens[0].start_pos assert "I love Berlin." == sentence.tokens[0].text @@ -94,7 +104,7 @@ def test_create_sentence_with_callable(): @pytest.mark.skip(reason="SpacyTokenizer needs optional requirements, so we skip the test by default") def test_create_sentence_with_spacy_tokenizer(): - sentence:Sentence = Sentence("I love Berlin.", use_tokenizer=SpacyTokenizer("en_core_sci_sm")) + sentence: Sentence = Sentence("I love Berlin.", use_tokenizer=SpacyTokenizer("en_core_sci_sm")) assert 4 == len(sentence.tokens) assert 0 == sentence.tokens[0].start_pos @@ -290,8 +300,8 @@ def test_problem_sentences(): sentence = Sentence(text) assert len(sentence) == 9 - text= "equivalently , accumulating the logs as :( 6 ) sl = 1N ∑ t = 1Nlogp ( Ll | xt ​ , θ ) where " \ - "p ( Ll | xt ​ , θ ) represents the class probability output" + text = "equivalently , accumulating the logs as :( 6 ) sl = 1N ∑ t = 1Nlogp ( Ll | xt ​ , θ ) where " \ + "p ( Ll | xt ​ , θ ) represents the class probability output" sentence = Sentence(text) assert len(sentence) == 37 @@ -305,7 +315,6 @@ def test_problem_sentences(): def test_token_indices(): - text = ": nation on" sentence = Sentence(text) assert text == sentence.to_original_text() @@ -352,23 +361,23 @@ def test_sentence_to_real_string(tasks_base_path): sentence = corpus.train[0] sentence.infer_space_after() assert ( - 'Schartau sagte dem " Tagesspiegel " vom Freitag , Fischer sei " in einer Weise aufgetreten , die alles andere als überzeugend war " .' - == sentence.to_tokenized_string() + 'Schartau sagte dem " Tagesspiegel " vom Freitag , Fischer sei " in einer Weise aufgetreten , die alles andere als überzeugend war " .' + == sentence.to_tokenized_string() ) assert ( - 'Schartau sagte dem "Tagesspiegel" vom Freitag, Fischer sei "in einer Weise aufgetreten, die alles andere als überzeugend war".' - == sentence.to_plain_string() + 'Schartau sagte dem "Tagesspiegel" vom Freitag, Fischer sei "in einer Weise aufgetreten, die alles andere als überzeugend war".' + == sentence.to_plain_string() ) sentence = corpus.train[1] sentence.infer_space_after() assert ( - "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als Möbelvertreter , als er einen fliegenden Händler aus dem Libanon traf ." - == sentence.to_tokenized_string() + "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als Möbelvertreter , als er einen fliegenden Händler aus dem Libanon traf ." + == sentence.to_tokenized_string() ) assert ( - "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als Möbelvertreter, als er einen fliegenden Händler aus dem Libanon traf." - == sentence.to_plain_string() + "Firmengründer Wolf Peter Bree arbeitete Anfang der siebziger Jahre als Möbelvertreter, als er einen fliegenden Händler aus dem Libanon traf." + == sentence.to_plain_string() ) @@ -587,7 +596,6 @@ def test_tagged_corpus_make_label_dictionary(): def test_tagged_corpus_statistics(): - train_sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label('label', 'class_1') dev_sentence = Sentence("The sun is shining.", use_tokenizer=True).add_label('label', 'class_2') @@ -614,7 +622,6 @@ def test_tagged_corpus_statistics(): def test_tagged_corpus_statistics_multi_label(): - train_sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label('label', 'class_1') dev_sentence = Sentence("The sun is shining.", use_tokenizer=True).add_label('label', 'class_2') @@ -671,7 +678,6 @@ def test_tagged_corpus_get_tag_statistic(): def test_tagged_corpus_downsample(): - sentence = Sentence("I love Berlin.", use_tokenizer=True).add_label('label', 'class_1') corpus: Corpus = Corpus( @@ -841,8 +847,8 @@ def test_sentence_to_dict(): dict = sentence.to_dict("ner") assert ( - "Zalando Research is located in Berlin, the capital of Germany." - == dict["text"] + "Zalando Research is located in Berlin, the capital of Germany." + == dict["text"] ) assert "Zalando Research" == dict["entities"][0]["text"] assert "Berlin" == dict["entities"][1]["text"]
ethereum__consensus-specs-2445
SSZ Union improvement proposal The current [Union type](https://github.com/ethereum/eth2.0-specs/blob/dev/ssz/simple-serialize.md#composite-types) is not used in Phase0, and largely not yet implemented in client implementations (exception: lighthouse). In more dynamic contexts, it's could be very useful however. The direct motivation for this is the [Merge block format](https://github.com/ethereum/eth2.0-specs/pull/2257), in which we may see different transaction types that need to be encoded. To avoid overhead of lots of unused zero fields (SSZ does not truncate zeroes like RLP does), you would need different transaction type definitions. And how do you mix these different transactions in the same list, with a Union type. One problem is that it takes 4 bytes per `Union` selector index (the switch between the type options), and we only really see a few type options in the common use case. This is a simple proposal to parametrize (at the type level, compile-time) the selector byte length. E.g. `Union[4, [uint256, Bitvector, uint8]]` would be **backwards compatible** with the already specified `Union[uint256, Bitvector, uint8]`. This would allow us to specify something like `Union[TRANSACTION_SELECTOR_LENGTH, [LegacyTransaction, BetterTransaction, SuperTransaction, AbstractedTransaction]]` (and yes, the selected index is part of the merkleization, no confusion between types in the merkle proof) For the merge specifically, an alternative is to use opaque transactions (`List[byte, max_size], a.k.a. Bytes[max_size]` to encode an RLP transaction, instead of structured data). The pro is that the beacon-spec does not have to care about transaction types, the con is that we miss out on SSZ merkleization of transactions into the block-root, one of the very primary places that merkle-proofs would actually be very useful (you can construct a merkle proof about any transaction detail). As a proof of concept, I implemented this new `Union` behavior in `remerkleable`, the SSZ library used in the python specification. See [here](https://github.com/protolambda/remerkleable/pull/9). (*Note for code reader: it represents the data as binary tree, and overlays it with views, it's focused on merkle proofs and data sharing more so than structural efficiency*).
[ { "content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nfrom pathlib import Path\nimport os\nimport re\nimport string\nimport textwrap\nfrom typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar\nfrom abc import ABC, abstractmethod\nimport ast\n\n\n# NOTE: have to programmatically include third-party dependencies in `setup.py`.\nRUAMEL_YAML_VERSION = \"ruamel.yaml==0.16.5\"\ntry:\n import ruamel.yaml\nexcept ImportError:\n import pip\n pip.main([\"install\", RUAMEL_YAML_VERSION])\n\nfrom ruamel.yaml import YAML\n\nMARKO_VERSION = \"marko==1.0.2\"\ntry:\n import marko\nexcept ImportError:\n import pip\n pip.main([\"install\", MARKO_VERSION])\n\nfrom marko.block import Heading, FencedCode, LinkRefDef, BlankLine\nfrom marko.inline import CodeSpan\nfrom marko.ext.gfm import gfm\nfrom marko.ext.gfm.elements import Table, Paragraph\n\n\n# Definitions in context.py\nPHASE0 = 'phase0'\nALTAIR = 'altair'\nMERGE = 'merge'\n\n# The helper functions that are used when defining constants\nCONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"ceillog2 accepts only positive values, x={x}\")\n return uint64((x - 1).bit_length())\n\n\ndef floorlog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"floorlog2 accepts only positive values, x={x}\")\n return uint64(x.bit_length() - 1)\n'''\n\n\nclass ProtocolDefinition(NamedTuple):\n # just function definitions currently. May expand with configuration vars in future.\n functions: Dict[str, str]\n\n\nclass VariableDefinition(NamedTuple):\n type_name: Optional[str]\n value: str\n comment: Optional[str] # e.g. \"noqa: E501\"\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n protocols: Dict[str, ProtocolDefinition]\n custom_types: Dict[str, str]\n constant_vars: Dict[str, VariableDefinition]\n preset_vars: Dict[str, VariableDefinition]\n config_vars: Dict[str, VariableDefinition]\n ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects\n ssz_objects: Dict[str, str]\n dataclasses: Dict[str, str]\n\n\ndef _get_name_from_heading(heading: Heading) -> Optional[str]:\n last_child = heading.children[-1]\n if isinstance(last_child, CodeSpan):\n return last_child.children\n return None\n\n\ndef _get_source_from_code_block(block: FencedCode) -> str:\n return block.children[0].children.strip()\n\n\ndef _get_function_name_from_source(source: str) -> str:\n fn = ast.parse(source).body[0]\n return fn.name\n\n\ndef _get_self_type_from_source(source: str) -> Optional[str]:\n fn = ast.parse(source).body[0]\n args = fn.args.args\n if len(args) == 0:\n return None\n if args[0].arg != 'self':\n return None\n if args[0].annotation is None:\n return None\n return args[0].annotation.id\n\n\ndef _get_class_info_from_source(source: str) -> (str, Optional[str]):\n class_def = ast.parse(source).body[0]\n base = class_def.bases[0]\n if isinstance(base, ast.Name):\n parent_class = base.id\n else:\n # NOTE: SSZ definition derives from earlier phase...\n # e.g. `phase0.SignedBeaconBlock`\n # TODO: check for consistency with other phases\n parent_class = None\n return class_def.name, parent_class\n\n\ndef _is_constant_id(name: str) -> bool:\n if name[0] not in string.ascii_uppercase + '_':\n return False\n return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))\n\n\nETH2_SPEC_COMMENT_PREFIX = \"eth2spec:\"\n\n\ndef _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:\n _, _, title = child._parse_info\n if not (title[0] == \"(\" and title[len(title)-1] == \")\"):\n return None\n title = title[1:len(title)-1]\n if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):\n return None\n return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()\n\n\ndef _parse_value(name: str, typed_value: str) -> VariableDefinition:\n comment = None\n if name == \"BLS12_381_Q\":\n comment = \"noqa: E501\"\n\n typed_value = typed_value.strip()\n if '(' not in typed_value:\n return VariableDefinition(type_name=None, value=typed_value, comment=comment)\n i = typed_value.index('(')\n type_name = typed_value[:i]\n\n return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment)\n\n\ndef get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:\n functions: Dict[str, str] = {}\n protocols: Dict[str, ProtocolDefinition] = {}\n constant_vars: Dict[str, VariableDefinition] = {}\n preset_vars: Dict[str, VariableDefinition] = {}\n config_vars: Dict[str, VariableDefinition] = {}\n ssz_dep_constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n dataclasses: Dict[str, str] = {}\n custom_types: Dict[str, str] = {}\n\n with open(file_name) as source_file:\n document = gfm.parse(source_file.read())\n\n current_name = None\n should_skip = False\n for child in document.children:\n if isinstance(child, BlankLine):\n continue\n if should_skip:\n should_skip = False\n continue\n if isinstance(child, Heading):\n current_name = _get_name_from_heading(child)\n elif isinstance(child, FencedCode):\n if child.lang != \"python\":\n continue\n source = _get_source_from_code_block(child)\n if source.startswith(\"def\"):\n current_name = _get_function_name_from_source(source)\n self_type_name = _get_self_type_from_source(source)\n function_def = \"\\n\".join(line.rstrip() for line in source.splitlines())\n if self_type_name is None:\n functions[current_name] = function_def\n else:\n if self_type_name not in protocols:\n protocols[self_type_name] = ProtocolDefinition(functions={})\n protocols[self_type_name].functions[current_name] = function_def\n elif source.startswith(\"@dataclass\"):\n dataclasses[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n elif source.startswith(\"class\"):\n class_name, parent_class = _get_class_info_from_source(source)\n # check consistency with spec\n assert class_name == current_name\n if parent_class:\n assert parent_class == \"Container\"\n # NOTE: trim whitespace from spec\n ssz_objects[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n else:\n raise Exception(\"unrecognized python code element\")\n elif isinstance(child, Table):\n for row in child.children:\n cells = row.children\n if len(cells) >= 2:\n name_cell = cells[0]\n name = name_cell.children[0].children\n\n value_cell = cells[1]\n value = value_cell.children[0].children\n if isinstance(value, list):\n # marko parses `**X**` as a list containing a X\n value = value[0].children\n\n if not _is_constant_id(name):\n # Check for short type declarations\n if value.startswith(\"uint\") or value.startswith(\"Bytes\") or value.startswith(\"ByteList\"):\n custom_types[name] = value\n continue\n\n if value.startswith(\"get_generalized_index\"):\n ssz_dep_constants[name] = value\n continue\n\n value_def = _parse_value(name, value)\n if name in preset:\n preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment)\n elif name in config:\n config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment)\n else:\n constant_vars[name] = value_def\n\n elif isinstance(child, LinkRefDef):\n comment = _get_eth2_spec_comment(child)\n if comment == \"skip\":\n should_skip = True\n\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\nclass SpecBuilder(ABC):\n @property\n @abstractmethod\n def fork(self) -> str:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def imports(cls, preset_name: str) -> str:\n \"\"\"\n Import objects from other libraries.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def preparations(cls) -> str:\n \"\"\"\n Define special types/constants for building pyspec or call functions.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def sundry_functions(cls) -> str:\n \"\"\"\n The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n \"\"\"\n The constants that are required for SSZ objects.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO\n \"\"\"\n The constants that are required for custom types.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def invariant_checks(cls) -> str:\n \"\"\"\n The invariant checks\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def build_spec(cls, preset_name: str,\n source_files: List[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n raise NotImplementedError()\n\n\n#\n# Phase0SpecBuilder\n#\nclass Phase0SpecBuilder(SpecBuilder):\n fork: str = PHASE0\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return '''from lru import LRU\nfrom dataclasses import (\n dataclass,\n field,\n)\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar, NamedTuple\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint8, uint32, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)\nfrom eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401\nfrom eth2spec.utils import bls\nfrom eth2spec.utils.hash_function import hash\n'''\n\n @classmethod\n def preparations(cls) -> str:\n return '''\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return '''\ndef get_eth1_data(block: Eth1Block) -> Eth1Data:\n \"\"\"\n A stub function return mocking Eth1Data.\n \"\"\"\n return Eth1Data(\n deposit_root=block.deposit_root,\n deposit_count=block.deposit_count,\n block_hash=hash_tree_root(block))\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_per_slot = get_committee_count_per_slot\nget_committee_count_per_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (\n state.randao_mixes.hash_tree_root(),\n state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()\n ),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def invariant_checks(cls) -> str:\n return ''\n\n @classmethod\n def build_spec(cls, preset_name: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n return _build_spec(preset_name, cls.fork, source_files, preset_files, config_file)\n\n\n#\n# AltairSpecBuilder\n#\nclass AltairSpecBuilder(Phase0SpecBuilder):\n fork: str = ALTAIR\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return super().imports(preset_name) + '\\n' + f'''\nfrom typing import NewType, Union\n\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Path\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations() + '\\n' + '''\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + '''\ndef get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:\n ssz_path = Path(ssz_class)\n for item in path:\n ssz_path = ssz_path / item\n return GeneralizedIndex(ssz_path.gindex())'''\n\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n constants = {\n 'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',\n 'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',\n }\n return {**super().hardcoded_ssz_dep_constants(), **constants}\n\n @classmethod\n def invariant_checks(cls) -> str:\n return '''\nassert (\n TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT\n) == WEIGHT_DENOMINATOR'''\n\n\n#\n# MergeSpecBuilder\n#\nclass MergeSpecBuilder(Phase0SpecBuilder):\n fork: str = MERGE\n\n @classmethod\n def imports(cls, preset_name: str):\n return super().imports(preset_name) + f'''\nfrom typing import Protocol\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations()\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + \"\"\"\nExecutionState = Any\n\n\ndef get_pow_block(hash: Bytes32) -> PowBlock:\n return PowBlock(block_hash=hash, is_valid=True, is_processed=True,\n total_difficulty=config.TRANSITION_TOTAL_DIFFICULTY)\n\n\ndef get_execution_state(execution_state_root: Bytes32) -> ExecutionState:\n pass\n\n\ndef get_pow_chain_head() -> PowBlock:\n pass\n\n\nclass NoopExecutionEngine(ExecutionEngine):\n\n def new_block(self, execution_payload: ExecutionPayload) -> bool:\n return True\n\n def set_head(self, block_hash: Hash32) -> bool:\n return True\n\n def finalize_block(self, block_hash: Hash32) -> bool:\n return True\n\n def assemble_block(self, block_hash: Hash32, timestamp: uint64) -> ExecutionPayload:\n raise NotImplementedError(\"no default block production\")\n\n\nEXECUTION_ENGINE = NoopExecutionEngine()\"\"\"\n\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> str:\n constants = {\n 'MAX_BYTES_PER_OPAQUE_TRANSACTION': 'uint64(2**20)',\n }\n return {**super().hardcoded_custom_type_dep_constants(), **constants}\n\n\nspec_builders = {\n builder.fork: builder\n for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)\n}\n\n\ndef objects_to_spec(preset_name: str,\n spec_object: SpecObject,\n builder: SpecBuilder,\n ordered_class_objects: Dict[str, str]) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n if not value.startswith('ByteList')\n ]\n )\n + ('\\n\\n' if len([key for key, value in spec_object.custom_types.items() if value.startswith('ByteList')]) > 0 else '')\n + '\\n\\n'.join(\n [\n f\"{key} = {value}\\n\"\n for key, value in spec_object.custom_types.items()\n if value.startswith('ByteList')\n ]\n )\n )\n\n def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:\n protocol = f\"class {protocol_name}(Protocol):\"\n for fn_source in protocol_def.functions.values():\n fn_source = fn_source.replace(\"self: \"+protocol_name, \"self\")\n protocol += \"\\n\\n\" + textwrap.indent(fn_source, \" \")\n return protocol\n\n protocols_spec = '\\n\\n\\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())\n for k in list(spec_object.functions):\n if \"ceillog2\" in k or \"floorlog2\" in k:\n del spec_object.functions[k]\n functions_spec = '\\n\\n\\n'.join(spec_object.functions.values())\n\n # Access global dict of config vars for runtime configurables\n for name in spec_object.config_vars.keys():\n functions_spec = functions_spec.replace(name, 'config.' + name)\n\n def format_config_var(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name}={vardef.value}'\n else:\n out = f'{name}={vardef.type_name}({vardef.value}),'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n config_spec = 'class Configuration(NamedTuple):\\n'\n config_spec += ' PRESET_BASE: str\\n'\n config_spec += '\\n'.join(f' {k}: {v.type_name if v.type_name is not None else \"int\"}'\n for k, v in spec_object.config_vars.items())\n config_spec += '\\n\\n\\nconfig = Configuration(\\n'\n config_spec += f' PRESET_BASE=\"{preset_name}\",\\n'\n config_spec += '\\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())\n config_spec += '\\n)\\n'\n\n def format_constant(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name} = {vardef.value}'\n else:\n out = f'{name} = {vardef.type_name}({vardef.value})'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n constant_vars_spec = '# Constant vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())\n preset_vars_spec = '# Preset vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())\n ordered_class_objects_spec = '\\n\\n\\n'.join(ordered_class_objects.values())\n ssz_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))\n ssz_dep_constants_verification = '\\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))\n custom_type_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))\n spec = (\n builder.imports(preset_name)\n + builder.preparations()\n + '\\n\\n' + f\"fork = \\'{builder.fork}\\'\\n\"\n # The constants that some SSZ containers require. Need to be defined before `new_type_definitions`\n + ('\\n\\n' + custom_type_dep_constants + '\\n' if custom_type_dep_constants != '' else '')\n + '\\n\\n' + new_type_definitions\n + '\\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS\n # The constants that some SSZ containers require. Need to be defined before `constants_spec`\n + ('\\n\\n' + ssz_dep_constants if ssz_dep_constants != '' else '')\n + '\\n\\n' + constant_vars_spec\n + '\\n\\n' + preset_vars_spec\n + '\\n\\n\\n' + config_spec\n + '\\n\\n' + ordered_class_objects_spec\n + ('\\n\\n\\n' + protocols_spec if protocols_spec != '' else '')\n + '\\n\\n\\n' + functions_spec\n + '\\n\\n' + builder.sundry_functions()\n # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are\n # as same as the spec definition.\n + ('\\n\\n\\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')\n + ('\\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '')\n + '\\n'\n )\n return spec\n\n\ndef combine_protocols(old_protocols: Dict[str, ProtocolDefinition],\n new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:\n for key, value in new_protocols.items():\n if key not in old_protocols:\n old_protocols[key] = value\n else:\n functions = combine_dicts(old_protocols[key].functions, value.functions)\n old_protocols[key] = ProtocolDefinition(functions=functions)\n return old_protocols\n\n\nT = TypeVar('T')\n\n\ndef combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:\n return {**old_dict, **new_dict}\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector',\n 'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',\n]\n\n\ndef dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n protocols = combine_protocols(spec0.protocols, spec1.protocols)\n functions = combine_dicts(spec0.functions, spec1.functions)\n custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)\n constant_vars = combine_dicts(spec0.constant_vars, spec1.constant_vars)\n preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)\n config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)\n ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)\n ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)\n dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\ndef parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:\n \"\"\"\n Parses a dict of basic str/int/list types into a dict for insertion into the spec code.\n \"\"\"\n out: Dict[str, str] = dict()\n for k, v in conf.items():\n if isinstance(v, str) and (v.startswith(\"0x\") or k == 'PRESET_BASE'):\n # Represent byte data with string, to avoid misinterpretation as big-endian int.\n # Everything is either byte data or an integer, with PRESET_BASE as one exception.\n out[k] = f\"'{v}'\"\n else:\n out[k] = str(int(v))\n return out\n\n\ndef load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:\n \"\"\"\n Loads the a directory of preset files, merges the result into one preset.\n \"\"\"\n preset = {}\n for fork_file in preset_files:\n yaml = YAML(typ='base')\n fork_preset: dict = yaml.load(fork_file)\n if fork_preset is None: # for empty YAML files\n continue\n if not set(fork_preset.keys()).isdisjoint(preset.keys()):\n duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))\n raise Exception(f\"duplicate config var(s) in preset files: {', '.join(duplicates)}\")\n preset.update(fork_preset)\n assert preset != {}\n return parse_config_vars(preset)\n\n\ndef load_config(config_path: Path) -> Dict[str, str]:\n \"\"\"\n Loads the given configuration file.\n \"\"\"\n yaml = YAML(typ='base')\n config_data = yaml.load(config_path)\n return parse_config_vars(config_data)\n\n\ndef _build_spec(preset_name: str, fork: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n preset = load_preset(preset_files)\n config = load_config(config_file)\n all_specs = [get_spec(spec, preset, config) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}\n dependency_order_class_objects(class_objects, spec_object.custom_types)\n\n return objects_to_spec(preset_name, spec_object, spec_builders[fork], class_objects)\n\n\nclass BuildTarget(NamedTuple):\n name: str\n preset_paths: List[Path]\n config_path: Path\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n build_targets: str\n parsed_build_targets: List[BuildTarget]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('build-targets=', None, \"Names, directory paths of compile-time presets, and default config paths.\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = PHASE0\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n self.build_targets = \"\"\"\n minimal:presets/minimal:configs/minimal.yaml\n mainnet:presets/mainnet:configs/mainnet.yaml \n \"\"\"\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork == PHASE0:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n \"\"\"\n elif self.spec_fork == ALTAIR:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n specs/altair/beacon-chain.md\n specs/altair/fork.md\n specs/altair/validator.md\n specs/altair/p2p-interface.md\n specs/altair/sync-protocol.md\n \"\"\"\n elif self.spec_fork == MERGE:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n specs/merge/beacon-chain.md\n specs/merge/fork-choice.md\n specs/merge/validator.md\n \"\"\"\n else:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n self.parsed_build_targets = []\n for target in self.build_targets.split():\n target = target.strip()\n data = target.split(':')\n if len(data) != 3:\n raise Exception('invalid target, expected \"name:preset_dir:config_file\" format, but got: %s' % target)\n name, preset_dir_path, config_path = data\n if any((c not in string.digits + string.ascii_letters) for c in name):\n raise Exception('invalid target name: \"%s\"' % name)\n if not os.path.exists(preset_dir_path):\n raise Exception('Preset dir \"%s\" does not exist' % preset_dir_path)\n _, _, preset_file_names = next(os.walk(preset_dir_path))\n preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]\n\n if not os.path.exists(config_path):\n raise Exception('Config file \"%s\" does not exist' % config_path)\n self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))\n\n def run(self):\n if not self.dry_run:\n dir_util.mkpath(self.out_dir)\n\n for (name, preset_paths, config_path) in self.parsed_build_targets:\n spec_str = spec_builders[self.spec_fork].build_spec(\n name, self.parsed_md_doc_paths, preset_paths, config_path)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\", build target: \"{name}\"')\n self.debug_print(spec_str)\n else:\n with open(os.path.join(self.out_dir, name+'.py'), 'w') as out:\n out.write(spec_str)\n\n if not self.dry_run:\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n # `mainnet` is the default spec.\n out.write(\"from . import mainnet as spec # noqa:F401\\n\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'presets': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"presets\": \"presets\",\n \"specs\": \"specs\",\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.750\"],\n \"generator\": [\"python-snappy==0.5.4\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==5.2.0\",\n \"milagro_bls_binding==1.6.3\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.19\",\n RUAMEL_YAML_VERSION,\n \"lru-dict==1.1.6\",\n MARKO_VERSION,\n ]\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nfrom pathlib import Path\nimport os\nimport re\nimport string\nimport textwrap\nfrom typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar\nfrom abc import ABC, abstractmethod\nimport ast\n\n\n# NOTE: have to programmatically include third-party dependencies in `setup.py`.\nRUAMEL_YAML_VERSION = \"ruamel.yaml==0.16.5\"\ntry:\n import ruamel.yaml\nexcept ImportError:\n import pip\n pip.main([\"install\", RUAMEL_YAML_VERSION])\n\nfrom ruamel.yaml import YAML\n\nMARKO_VERSION = \"marko==1.0.2\"\ntry:\n import marko\nexcept ImportError:\n import pip\n pip.main([\"install\", MARKO_VERSION])\n\nfrom marko.block import Heading, FencedCode, LinkRefDef, BlankLine\nfrom marko.inline import CodeSpan\nfrom marko.ext.gfm import gfm\nfrom marko.ext.gfm.elements import Table, Paragraph\n\n\n# Definitions in context.py\nPHASE0 = 'phase0'\nALTAIR = 'altair'\nMERGE = 'merge'\n\n# The helper functions that are used when defining constants\nCONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"ceillog2 accepts only positive values, x={x}\")\n return uint64((x - 1).bit_length())\n\n\ndef floorlog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"floorlog2 accepts only positive values, x={x}\")\n return uint64(x.bit_length() - 1)\n'''\n\n\nclass ProtocolDefinition(NamedTuple):\n # just function definitions currently. May expand with configuration vars in future.\n functions: Dict[str, str]\n\n\nclass VariableDefinition(NamedTuple):\n type_name: Optional[str]\n value: str\n comment: Optional[str] # e.g. \"noqa: E501\"\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n protocols: Dict[str, ProtocolDefinition]\n custom_types: Dict[str, str]\n constant_vars: Dict[str, VariableDefinition]\n preset_vars: Dict[str, VariableDefinition]\n config_vars: Dict[str, VariableDefinition]\n ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects\n ssz_objects: Dict[str, str]\n dataclasses: Dict[str, str]\n\n\ndef _get_name_from_heading(heading: Heading) -> Optional[str]:\n last_child = heading.children[-1]\n if isinstance(last_child, CodeSpan):\n return last_child.children\n return None\n\n\ndef _get_source_from_code_block(block: FencedCode) -> str:\n return block.children[0].children.strip()\n\n\ndef _get_function_name_from_source(source: str) -> str:\n fn = ast.parse(source).body[0]\n return fn.name\n\n\ndef _get_self_type_from_source(source: str) -> Optional[str]:\n fn = ast.parse(source).body[0]\n args = fn.args.args\n if len(args) == 0:\n return None\n if args[0].arg != 'self':\n return None\n if args[0].annotation is None:\n return None\n return args[0].annotation.id\n\n\ndef _get_class_info_from_source(source: str) -> (str, Optional[str]):\n class_def = ast.parse(source).body[0]\n base = class_def.bases[0]\n if isinstance(base, ast.Name):\n parent_class = base.id\n else:\n # NOTE: SSZ definition derives from earlier phase...\n # e.g. `phase0.SignedBeaconBlock`\n # TODO: check for consistency with other phases\n parent_class = None\n return class_def.name, parent_class\n\n\ndef _is_constant_id(name: str) -> bool:\n if name[0] not in string.ascii_uppercase + '_':\n return False\n return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))\n\n\nETH2_SPEC_COMMENT_PREFIX = \"eth2spec:\"\n\n\ndef _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:\n _, _, title = child._parse_info\n if not (title[0] == \"(\" and title[len(title)-1] == \")\"):\n return None\n title = title[1:len(title)-1]\n if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):\n return None\n return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()\n\n\ndef _parse_value(name: str, typed_value: str) -> VariableDefinition:\n comment = None\n if name == \"BLS12_381_Q\":\n comment = \"noqa: E501\"\n\n typed_value = typed_value.strip()\n if '(' not in typed_value:\n return VariableDefinition(type_name=None, value=typed_value, comment=comment)\n i = typed_value.index('(')\n type_name = typed_value[:i]\n\n return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment)\n\n\ndef get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:\n functions: Dict[str, str] = {}\n protocols: Dict[str, ProtocolDefinition] = {}\n constant_vars: Dict[str, VariableDefinition] = {}\n preset_vars: Dict[str, VariableDefinition] = {}\n config_vars: Dict[str, VariableDefinition] = {}\n ssz_dep_constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n dataclasses: Dict[str, str] = {}\n custom_types: Dict[str, str] = {}\n\n with open(file_name) as source_file:\n document = gfm.parse(source_file.read())\n\n current_name = None\n should_skip = False\n for child in document.children:\n if isinstance(child, BlankLine):\n continue\n if should_skip:\n should_skip = False\n continue\n if isinstance(child, Heading):\n current_name = _get_name_from_heading(child)\n elif isinstance(child, FencedCode):\n if child.lang != \"python\":\n continue\n source = _get_source_from_code_block(child)\n if source.startswith(\"def\"):\n current_name = _get_function_name_from_source(source)\n self_type_name = _get_self_type_from_source(source)\n function_def = \"\\n\".join(line.rstrip() for line in source.splitlines())\n if self_type_name is None:\n functions[current_name] = function_def\n else:\n if self_type_name not in protocols:\n protocols[self_type_name] = ProtocolDefinition(functions={})\n protocols[self_type_name].functions[current_name] = function_def\n elif source.startswith(\"@dataclass\"):\n dataclasses[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n elif source.startswith(\"class\"):\n class_name, parent_class = _get_class_info_from_source(source)\n # check consistency with spec\n assert class_name == current_name\n if parent_class:\n assert parent_class == \"Container\"\n # NOTE: trim whitespace from spec\n ssz_objects[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n else:\n raise Exception(\"unrecognized python code element\")\n elif isinstance(child, Table):\n for row in child.children:\n cells = row.children\n if len(cells) >= 2:\n name_cell = cells[0]\n name = name_cell.children[0].children\n\n value_cell = cells[1]\n value = value_cell.children[0].children\n if isinstance(value, list):\n # marko parses `**X**` as a list containing a X\n value = value[0].children\n\n if not _is_constant_id(name):\n # Check for short type declarations\n if value.startswith(\"uint\") or value.startswith(\"Bytes\") or value.startswith(\"ByteList\"):\n custom_types[name] = value\n continue\n\n if value.startswith(\"get_generalized_index\"):\n ssz_dep_constants[name] = value\n continue\n\n value_def = _parse_value(name, value)\n if name in preset:\n preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment)\n elif name in config:\n config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment)\n else:\n constant_vars[name] = value_def\n\n elif isinstance(child, LinkRefDef):\n comment = _get_eth2_spec_comment(child)\n if comment == \"skip\":\n should_skip = True\n\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\nclass SpecBuilder(ABC):\n @property\n @abstractmethod\n def fork(self) -> str:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def imports(cls, preset_name: str) -> str:\n \"\"\"\n Import objects from other libraries.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def preparations(cls) -> str:\n \"\"\"\n Define special types/constants for building pyspec or call functions.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def sundry_functions(cls) -> str:\n \"\"\"\n The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n \"\"\"\n The constants that are required for SSZ objects.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO\n \"\"\"\n The constants that are required for custom types.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def invariant_checks(cls) -> str:\n \"\"\"\n The invariant checks\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def build_spec(cls, preset_name: str,\n source_files: List[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n raise NotImplementedError()\n\n\n#\n# Phase0SpecBuilder\n#\nclass Phase0SpecBuilder(SpecBuilder):\n fork: str = PHASE0\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return '''from lru import LRU\nfrom dataclasses import (\n dataclass,\n field,\n)\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar, NamedTuple\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint8, uint32, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)\nfrom eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401\nfrom eth2spec.utils import bls\nfrom eth2spec.utils.hash_function import hash\n'''\n\n @classmethod\n def preparations(cls) -> str:\n return '''\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return '''\ndef get_eth1_data(block: Eth1Block) -> Eth1Data:\n \"\"\"\n A stub function return mocking Eth1Data.\n \"\"\"\n return Eth1Data(\n deposit_root=block.deposit_root,\n deposit_count=block.deposit_count,\n block_hash=hash_tree_root(block))\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_per_slot = get_committee_count_per_slot\nget_committee_count_per_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (\n state.randao_mixes.hash_tree_root(),\n state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()\n ),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def invariant_checks(cls) -> str:\n return ''\n\n @classmethod\n def build_spec(cls, preset_name: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n return _build_spec(preset_name, cls.fork, source_files, preset_files, config_file)\n\n\n#\n# AltairSpecBuilder\n#\nclass AltairSpecBuilder(Phase0SpecBuilder):\n fork: str = ALTAIR\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return super().imports(preset_name) + '\\n' + f'''\nfrom typing import NewType, Union\n\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Path\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations() + '\\n' + '''\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + '''\ndef get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:\n ssz_path = Path(ssz_class)\n for item in path:\n ssz_path = ssz_path / item\n return GeneralizedIndex(ssz_path.gindex())'''\n\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n constants = {\n 'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',\n 'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',\n }\n return {**super().hardcoded_ssz_dep_constants(), **constants}\n\n @classmethod\n def invariant_checks(cls) -> str:\n return '''\nassert (\n TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT\n) == WEIGHT_DENOMINATOR'''\n\n\n#\n# MergeSpecBuilder\n#\nclass MergeSpecBuilder(Phase0SpecBuilder):\n fork: str = MERGE\n\n @classmethod\n def imports(cls, preset_name: str):\n return super().imports(preset_name) + f'''\nfrom typing import Protocol\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations()\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + \"\"\"\nExecutionState = Any\n\n\ndef get_pow_block(hash: Bytes32) -> PowBlock:\n return PowBlock(block_hash=hash, is_valid=True, is_processed=True,\n total_difficulty=config.TRANSITION_TOTAL_DIFFICULTY)\n\n\ndef get_execution_state(execution_state_root: Bytes32) -> ExecutionState:\n pass\n\n\ndef get_pow_chain_head() -> PowBlock:\n pass\n\n\nclass NoopExecutionEngine(ExecutionEngine):\n\n def new_block(self, execution_payload: ExecutionPayload) -> bool:\n return True\n\n def set_head(self, block_hash: Hash32) -> bool:\n return True\n\n def finalize_block(self, block_hash: Hash32) -> bool:\n return True\n\n def assemble_block(self, block_hash: Hash32, timestamp: uint64) -> ExecutionPayload:\n raise NotImplementedError(\"no default block production\")\n\n\nEXECUTION_ENGINE = NoopExecutionEngine()\"\"\"\n\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> str:\n constants = {\n 'MAX_BYTES_PER_OPAQUE_TRANSACTION': 'uint64(2**20)',\n }\n return {**super().hardcoded_custom_type_dep_constants(), **constants}\n\n\nspec_builders = {\n builder.fork: builder\n for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)\n}\n\n\ndef objects_to_spec(preset_name: str,\n spec_object: SpecObject,\n builder: SpecBuilder,\n ordered_class_objects: Dict[str, str]) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n if not value.startswith('ByteList')\n ]\n )\n + ('\\n\\n' if len([key for key, value in spec_object.custom_types.items() if value.startswith('ByteList')]) > 0 else '')\n + '\\n\\n'.join(\n [\n f\"{key} = {value}\\n\"\n for key, value in spec_object.custom_types.items()\n if value.startswith('ByteList')\n ]\n )\n )\n\n def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:\n protocol = f\"class {protocol_name}(Protocol):\"\n for fn_source in protocol_def.functions.values():\n fn_source = fn_source.replace(\"self: \"+protocol_name, \"self\")\n protocol += \"\\n\\n\" + textwrap.indent(fn_source, \" \")\n return protocol\n\n protocols_spec = '\\n\\n\\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())\n for k in list(spec_object.functions):\n if \"ceillog2\" in k or \"floorlog2\" in k:\n del spec_object.functions[k]\n functions_spec = '\\n\\n\\n'.join(spec_object.functions.values())\n\n # Access global dict of config vars for runtime configurables\n for name in spec_object.config_vars.keys():\n functions_spec = functions_spec.replace(name, 'config.' + name)\n\n def format_config_var(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name}={vardef.value}'\n else:\n out = f'{name}={vardef.type_name}({vardef.value}),'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n config_spec = 'class Configuration(NamedTuple):\\n'\n config_spec += ' PRESET_BASE: str\\n'\n config_spec += '\\n'.join(f' {k}: {v.type_name if v.type_name is not None else \"int\"}'\n for k, v in spec_object.config_vars.items())\n config_spec += '\\n\\n\\nconfig = Configuration(\\n'\n config_spec += f' PRESET_BASE=\"{preset_name}\",\\n'\n config_spec += '\\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())\n config_spec += '\\n)\\n'\n\n def format_constant(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name} = {vardef.value}'\n else:\n out = f'{name} = {vardef.type_name}({vardef.value})'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n constant_vars_spec = '# Constant vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())\n preset_vars_spec = '# Preset vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())\n ordered_class_objects_spec = '\\n\\n\\n'.join(ordered_class_objects.values())\n ssz_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))\n ssz_dep_constants_verification = '\\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))\n custom_type_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))\n spec = (\n builder.imports(preset_name)\n + builder.preparations()\n + '\\n\\n' + f\"fork = \\'{builder.fork}\\'\\n\"\n # The constants that some SSZ containers require. Need to be defined before `new_type_definitions`\n + ('\\n\\n' + custom_type_dep_constants + '\\n' if custom_type_dep_constants != '' else '')\n + '\\n\\n' + new_type_definitions\n + '\\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS\n # The constants that some SSZ containers require. Need to be defined before `constants_spec`\n + ('\\n\\n' + ssz_dep_constants if ssz_dep_constants != '' else '')\n + '\\n\\n' + constant_vars_spec\n + '\\n\\n' + preset_vars_spec\n + '\\n\\n\\n' + config_spec\n + '\\n\\n' + ordered_class_objects_spec\n + ('\\n\\n\\n' + protocols_spec if protocols_spec != '' else '')\n + '\\n\\n\\n' + functions_spec\n + '\\n\\n' + builder.sundry_functions()\n # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are\n # as same as the spec definition.\n + ('\\n\\n\\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')\n + ('\\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '')\n + '\\n'\n )\n return spec\n\n\ndef combine_protocols(old_protocols: Dict[str, ProtocolDefinition],\n new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:\n for key, value in new_protocols.items():\n if key not in old_protocols:\n old_protocols[key] = value\n else:\n functions = combine_dicts(old_protocols[key].functions, value.functions)\n old_protocols[key] = ProtocolDefinition(functions=functions)\n return old_protocols\n\n\nT = TypeVar('T')\n\n\ndef combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:\n return {**old_dict, **new_dict}\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector',\n 'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',\n]\n\n\ndef dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n protocols = combine_protocols(spec0.protocols, spec1.protocols)\n functions = combine_dicts(spec0.functions, spec1.functions)\n custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)\n constant_vars = combine_dicts(spec0.constant_vars, spec1.constant_vars)\n preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)\n config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)\n ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)\n ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)\n dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\ndef parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:\n \"\"\"\n Parses a dict of basic str/int/list types into a dict for insertion into the spec code.\n \"\"\"\n out: Dict[str, str] = dict()\n for k, v in conf.items():\n if isinstance(v, str) and (v.startswith(\"0x\") or k == 'PRESET_BASE'):\n # Represent byte data with string, to avoid misinterpretation as big-endian int.\n # Everything is either byte data or an integer, with PRESET_BASE as one exception.\n out[k] = f\"'{v}'\"\n else:\n out[k] = str(int(v))\n return out\n\n\ndef load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:\n \"\"\"\n Loads the a directory of preset files, merges the result into one preset.\n \"\"\"\n preset = {}\n for fork_file in preset_files:\n yaml = YAML(typ='base')\n fork_preset: dict = yaml.load(fork_file)\n if fork_preset is None: # for empty YAML files\n continue\n if not set(fork_preset.keys()).isdisjoint(preset.keys()):\n duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))\n raise Exception(f\"duplicate config var(s) in preset files: {', '.join(duplicates)}\")\n preset.update(fork_preset)\n assert preset != {}\n return parse_config_vars(preset)\n\n\ndef load_config(config_path: Path) -> Dict[str, str]:\n \"\"\"\n Loads the given configuration file.\n \"\"\"\n yaml = YAML(typ='base')\n config_data = yaml.load(config_path)\n return parse_config_vars(config_data)\n\n\ndef _build_spec(preset_name: str, fork: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n preset = load_preset(preset_files)\n config = load_config(config_file)\n all_specs = [get_spec(spec, preset, config) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}\n dependency_order_class_objects(class_objects, spec_object.custom_types)\n\n return objects_to_spec(preset_name, spec_object, spec_builders[fork], class_objects)\n\n\nclass BuildTarget(NamedTuple):\n name: str\n preset_paths: List[Path]\n config_path: Path\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n build_targets: str\n parsed_build_targets: List[BuildTarget]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('build-targets=', None, \"Names, directory paths of compile-time presets, and default config paths.\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = PHASE0\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n self.build_targets = \"\"\"\n minimal:presets/minimal:configs/minimal.yaml\n mainnet:presets/mainnet:configs/mainnet.yaml \n \"\"\"\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork == PHASE0:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n \"\"\"\n elif self.spec_fork == ALTAIR:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n specs/altair/beacon-chain.md\n specs/altair/fork.md\n specs/altair/validator.md\n specs/altair/p2p-interface.md\n specs/altair/sync-protocol.md\n \"\"\"\n elif self.spec_fork == MERGE:\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n specs/merge/beacon-chain.md\n specs/merge/fork-choice.md\n specs/merge/validator.md\n \"\"\"\n else:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n self.parsed_build_targets = []\n for target in self.build_targets.split():\n target = target.strip()\n data = target.split(':')\n if len(data) != 3:\n raise Exception('invalid target, expected \"name:preset_dir:config_file\" format, but got: %s' % target)\n name, preset_dir_path, config_path = data\n if any((c not in string.digits + string.ascii_letters) for c in name):\n raise Exception('invalid target name: \"%s\"' % name)\n if not os.path.exists(preset_dir_path):\n raise Exception('Preset dir \"%s\" does not exist' % preset_dir_path)\n _, _, preset_file_names = next(os.walk(preset_dir_path))\n preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]\n\n if not os.path.exists(config_path):\n raise Exception('Config file \"%s\" does not exist' % config_path)\n self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))\n\n def run(self):\n if not self.dry_run:\n dir_util.mkpath(self.out_dir)\n\n for (name, preset_paths, config_path) in self.parsed_build_targets:\n spec_str = spec_builders[self.spec_fork].build_spec(\n name, self.parsed_md_doc_paths, preset_paths, config_path)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\", build target: \"{name}\"')\n self.debug_print(spec_str)\n else:\n with open(os.path.join(self.out_dir, name+'.py'), 'w') as out:\n out.write(spec_str)\n\n if not self.dry_run:\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n # `mainnet` is the default spec.\n out.write(\"from . import mainnet as spec # noqa:F401\\n\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'presets': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"presets\": \"presets\",\n \"specs\": \"specs\",\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.750\"],\n \"generator\": [\"python-snappy==0.5.4\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==5.2.0\",\n \"milagro_bls_binding==1.6.3\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.20\",\n RUAMEL_YAML_VERSION,\n \"lru-dict==1.1.6\",\n MARKO_VERSION,\n ]\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 99e501d9f3..cd2006f715 100644 --- a/setup.py +++ b/setup.py @@ -1017,7 +1017,7 @@ def run(self): "py_ecc==5.2.0", "milagro_bls_binding==1.6.3", "dataclasses==0.6", - "remerkleable==0.1.19", + "remerkleable==0.1.20", RUAMEL_YAML_VERSION, "lru-dict==1.1.6", MARKO_VERSION, diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index d97b8ea1c9..89a1ebc0b8 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -20,7 +20,8 @@ - [`null`](#null) - [`Bitvector[N]`](#bitvectorn) - [`Bitlist[N]`](#bitlistn) - - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) + - [Vectors, containers, lists](#vectors-containers-lists) + - [Union](#union) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Summaries and expansions](#summaries-and-expansions) @@ -61,7 +62,7 @@ * **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits * notation `Bitlist[N]` * **union**: union type containing one of the given subtypes - * notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]` + * notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]` *Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies. @@ -77,7 +78,6 @@ For convenience we alias: * `byte` to `uint8` (this is a basic type) * `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type) * `ByteList[N]` to `List[byte, N]` -* `null`: `{}` ### Default values Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types. @@ -101,7 +101,7 @@ An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it - Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal. - Containers with no fields are illegal. -- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero). +- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero). ## Serialization @@ -150,7 +150,7 @@ array[len(value) // 8] |= 1 << (len(value) % 8) return bytes(array) ``` -### Vectors, containers, lists, unions +### Vectors, containers, lists ```python # Recursively serialize @@ -170,14 +170,26 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum return b"".join(fixed_parts + variable_parts) ``` -If `value` is a union type: +### Union -Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type. +A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`. + +A `Union`: +- May have multiple selectors with the same type. +- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions. +- Must have at least 1 type option. +- May have `None` as first type option, i.e. `selector == 0` +- Must have at least 2 type options if the first is `None` +- Is always considered a variable-length type, even if all type options have an equal fixed-length. ```python -serialized_bytes = serialize(value.value) -serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little") -return serialized_type_index + serialized_bytes +if value.value is None: + assert value.selector == 0 + return b"\x00" +else: + serialized_bytes = serialize(value.value) + serialized_selector_index = value.selector.to_bytes(1, "little") + return serialized_selector_index + serialized_bytes ``` ## Deserialization @@ -191,12 +203,14 @@ Deserialization can be implemented using a recursive algorithm. The deserializat * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) * Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. * In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. +* The first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: - Offsets: out of order, out of range, mismatching minimum element size. - Scope: Extra unused bytes, not aligned with element size. - More elements than a list limit allows. Part of enforcing consensus. +- An out-of-bounds selected index in an `Union` Efficient algorithms for computing this object can be found in [the implementations](#implementations). @@ -227,7 +241,7 @@ We first define helper functions: - If `1` chunk: the root is the chunk itself. - If `> 1` chunks: merkleize as binary tree. * `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`. -* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`. +* `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`. We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: @@ -237,7 +251,8 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi * `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist. * `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container. * `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects. -* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type. +* `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None` +* `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None` ## Summaries and expansions diff --git a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py index 9b18f8bdae..5a1b61d0be 100644 --- a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -2,6 +2,7 @@ # Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec. from remerkleable.complex import Container, Vector, List +from remerkleable.union import Union from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256 from remerkleable.bitfields import Bitvector, Bitlist from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
googleapis__python-bigquery-398
google-cloud-bigquery[opentelemetry] un-installable together with opentelemetry-exporter-google-cloud According to the instructions [Instrumenting With OpenTelemetry](https://github.com/googleapis/python-bigquery#instrumenting-with-opentelemetry) in the README the following packages needs to be installed to use OpenTelemtry tracing `google-cloud-bigquery[opentelemetry] opentelemetry-exporter-google-cloud`, but `google-cloud-bigquery[opentelemetry]` depends on `opentelemetry-api`, `opentelemetry-sdk`, and `opentelemetry-instrumentation` of version `0.9b0` [setup.py](https://github.com/googleapis/python-bigquery/blob/master/setup.py#L57-L61). But there is no version of `opentelemetry-exporter-google-cloud` that can use that (old) version. https://github.com/GoogleCloudPlatform/opentelemetry-operations-python/tree/master/opentelemetry-exporter-google-cloud #### Environment details - OS type and version: macOS 10.15 - Python version: 3.8.6 - pip version: 20.3.3 - `google-cloud-bigquery` version: 2.2.0 #### Steps to reproduce ``` $ pip3.8 install --user -r <(echo "google-cloud-bigquery[opentelemetry]==2.2.0 opentelemetry-exporter-google-cloud") Requirement already satisfied: google-cloud-bigquery[opentelemetry]==2.2.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from -r /dev/fd/63 (line 1)) (2.2.0) Requirement already satisfied: opentelemetry-exporter-google-cloud in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from -r /dev/fd/63 (line 2)) (0.11b0) Requirement already satisfied: google-resumable-media<2.0dev,>=0.6.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.1.0) Requirement already satisfied: proto-plus>=1.10.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.10.0) Requirement already satisfied: google-api-core[grpc]<2.0.0dev,>=1.22.2 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.22.2) Requirement already satisfied: protobuf>=3.12.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (3.13.0) Requirement already satisfied: six<2.0.0dev,>=1.13.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.15.0) Requirement already satisfied: google-cloud-core<2.0dev,>=1.4.1 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.4.3) Collecting opentelemetry-sdk==0.9b0; extra == "opentelemetry" Using cached opentelemetry_sdk-0.9b0-py3-none-any.whl (33 kB) Requirement already satisfied: opentelemetry-instrumentation==0.9b0; extra == "opentelemetry" in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (0.9b0) Collecting opentelemetry-api==0.9b0; extra == "opentelemetry" Using cached opentelemetry_api-0.9b0-py3-none-any.whl (43 kB) Requirement already satisfied: google-cloud-monitoring in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from opentelemetry-exporter-google-cloud->-r /dev/fd/63 (line 2)) (1.1.0) Requirement already satisfied: google-cloud-trace in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from opentelemetry-exporter-google-cloud->-r /dev/fd/63 (line 2)) (0.24.0) Requirement already satisfied: google-crc32c<2.0dev,>=1.0; python_version >= "3.5" in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.0.0) Requirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.52.0) Requirement already satisfied: requests<3.0.0dev,>=2.18.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (2.24.0) Requirement already satisfied: google-auth<2.0dev,>=1.21.1 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.22.0) Requirement already satisfied: setuptools>=34.0.0 in /usr/local/lib/python3.8/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (50.3.0) Requirement already satisfied: pytz in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (2020.1) Requirement already satisfied: grpcio<2.0dev,>=1.29.0; extra == "grpc" in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.32.0) Requirement already satisfied: wrapt<2.0.0,>=1.0.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from opentelemetry-instrumentation==0.9b0; extra == "opentelemetry"->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.12.1) Requirement already satisfied: idna<3,>=2.5 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (2.10) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.25.10) Requirement already satisfied: certifi>=2017.4.17 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (2020.6.20) Requirement already satisfied: chardet<4,>=3.0.2 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (3.0.4) Requirement already satisfied: aiohttp<4.0.0dev,>=3.6.2; python_version >= "3.6" in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (3.6.2) Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (0.2.8) Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3.5" in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (4.6) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (4.1.1) Requirement already satisfied: async-timeout<4.0,>=3.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from aiohttp<4.0.0dev,>=3.6.2; python_version >= "3.6"->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (3.0.1) Requirement already satisfied: multidict<5.0,>=4.5 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from aiohttp<4.0.0dev,>=3.6.2; python_version >= "3.6"->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (4.7.6) Requirement already satisfied: yarl<2.0,>=1.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from aiohttp<4.0.0dev,>=3.6.2; python_version >= "3.6"->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (1.6.0) Requirement already satisfied: attrs>=17.3.0 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from aiohttp<4.0.0dev,>=3.6.2; python_version >= "3.6"->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (20.2.0) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /Users/rvonessen/Library/Python/3.8/lib/python/site-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.21.1->google-api-core[grpc]<2.0.0dev,>=1.22.2->google-cloud-bigquery[opentelemetry]==2.2.0->-r /dev/fd/63 (line 1)) (0.4.8) Installing collected packages: opentelemetry-api, opentelemetry-sdk Attempting uninstall: opentelemetry-api Found existing installation: opentelemetry-api 0.11b0 Uninstalling opentelemetry-api-0.11b0: Successfully uninstalled opentelemetry-api-0.11b0 Attempting uninstall: opentelemetry-sdk Found existing installation: opentelemetry-sdk 0.11b0 Uninstalling opentelemetry-sdk-0.11b0: Successfully uninstalled opentelemetry-sdk-0.11b0 ERROR: After October 2020 you may experience errors when installing or updating packages. This is because pip will change the way that it resolves dependency conflicts. We recommend you use --use-feature=2020-resolver to test your packages with the new resolver before it becomes the default. opentelemetry-exporter-google-cloud 0.11b0 requires opentelemetry-api==0.11b0, but you'll have opentelemetry-api 0.9b0 which is incompatible. opentelemetry-exporter-google-cloud 0.11b0 requires opentelemetry-sdk==0.11b0, but you'll have opentelemetry-sdk 0.9b0 which is incompatible. ``` (The result is the same using the `2020-resolver`.)
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.9b0\",\n \"opentelemetry-sdk==0.9b0\",\n \"opentelemetry-instrumentation==0.9b0 \",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 48c4a7518..5f4e506eb 100644 --- a/setup.py +++ b/setup.py @@ -55,9 +55,9 @@ ], "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], "opentelemetry": [ - "opentelemetry-api==0.9b0", - "opentelemetry-sdk==0.9b0", - "opentelemetry-instrumentation==0.9b0 ", + "opentelemetry-api==0.11b0", + "opentelemetry-sdk==0.11b0", + "opentelemetry-instrumentation==0.11b0", ], }
marshmallow-code__webargs-414
Schema factory only variable fail - can't pass list type Looking at the [schema factory docs](https://webargs.readthedocs.io/en/latest/advanced.html#schema-factories), I'm interested in trying the ``` # Filter based on 'fields' query parameter only = request.args.get("fields", None) ``` part. However, when I try appending something like `?fields=some_field` to my HTTP request, I get the following error: ``` File "edited/marshmallow/schema.py", line 349, in __init__ raise StringNotCollectionError('"only" should be a list of strings') ``` As far as I can see, webargs always passes the query string parameters as strings. I tried wrapping it in square brackets, but I think I'm barking up the wrong tree. Have I misunderstood something, or is this a bug?
[ { "content": "\"\"\"Example implementation of using a marshmallow Schema for both request input\nand output with a `use_schema` decorator.\nRun the app:\n\n $ python examples/schema_example.py\n\nTry the following with httpie (a cURL-like utility, http://httpie.org):\n\n $ pip install httpie\n $ http GET :5001/users/\n $ http GET :5001/users/42\n $ http POST :5001/users/ usename=brian first_name=Brian last_name=May\n $ http PATCH :5001/users/42 username=freddie\n $ http GET :5001/users/ limit==1\n\"\"\"\nimport functools\nfrom flask import Flask, request, jsonify\nimport random\n\nfrom marshmallow import Schema, fields, post_dump\nfrom webargs.flaskparser import parser, use_kwargs\n\napp = Flask(__name__)\n\n##### Fake database and models #####\n\n\nclass Model:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n def update(self, **kwargs):\n self.__dict__.update(kwargs)\n\n @classmethod\n def insert(cls, db, **kwargs):\n collection = db[cls.collection]\n new_id = None\n if \"id\" in kwargs: # for setting up fixtures\n new_id = kwargs.pop(\"id\")\n else: # find a new id\n found_id = False\n while not found_id:\n new_id = random.randint(1, 9999)\n if new_id not in collection:\n found_id = True\n new_record = cls(id=new_id, **kwargs)\n collection[new_id] = new_record\n return new_record\n\n\nclass User(Model):\n collection = \"users\"\n\n\ndb = {\"users\": {}}\n\n\n##### use_schema #####\n\n\ndef use_schema(schema, list_view=False, locations=None):\n \"\"\"View decorator for using a marshmallow schema to\n (1) parse a request's input and\n (2) serializing the view's output to a JSON response.\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n use_args_wrapper = parser.use_args(schema, locations=locations)\n # Function wrapped with use_args\n func_with_args = use_args_wrapper(func)\n ret = func_with_args(*args, **kwargs)\n # Serialize and jsonify the return value\n return jsonify(schema.dump(ret, many=list_view).data)\n\n return wrapped\n\n return decorator\n\n\n##### Schemas #####\n\n\nclass UserSchema(Schema):\n id = fields.Int(dump_only=True)\n username = fields.Str()\n first_name = fields.Str()\n last_name = fields.Str()\n\n class Meta:\n strict = True\n\n @post_dump(pass_many=True)\n def wrap_with_envelope(self, data, many, **kwargs):\n return {\"data\": data}\n\n\n##### Routes #####\n\n\[email protected](\"/users/<int:user_id>\", methods=[\"GET\", \"PATCH\"])\n@use_schema(UserSchema())\ndef user_detail(reqargs, user_id):\n user = db[\"users\"].get(user_id)\n if not user:\n return jsonify({\"message\": \"User not found\"}), 404\n if request.method == \"PATCH\" and reqargs:\n user.update(**reqargs)\n return user\n\n\n# You can add additional arguments with use_kwargs\[email protected](\"/users/\", methods=[\"GET\", \"POST\"])\n@use_kwargs({\"limit\": fields.Int(missing=10, location=\"query\")})\n@use_schema(UserSchema(), list_view=True)\ndef user_list(reqargs, limit):\n users = db[\"users\"].values()\n if request.method == \"POST\":\n User.insert(db=db, **reqargs)\n return list(users)[:limit]\n\n\n# Return validation errors as JSON\[email protected](422)\[email protected](400)\ndef handle_validation_error(err):\n exc = getattr(err, \"exc\", None)\n if exc:\n headers = err.data[\"headers\"]\n messages = exc.messages\n else:\n headers = None\n messages = [\"Invalid request.\"]\n if headers:\n return jsonify({\"errors\": messages}), err.code, headers\n else:\n return jsonify({\"errors\": messages}), err.code\n\n\nif __name__ == \"__main__\":\n User.insert(\n db=db, id=42, username=\"fred\", first_name=\"Freddie\", last_name=\"Mercury\"\n )\n app.run(port=5001, debug=True)\n", "path": "examples/schema_example.py" } ]
[ { "content": "\"\"\"Example implementation of using a marshmallow Schema for both request input\nand output with a `use_schema` decorator.\nRun the app:\n\n $ python examples/schema_example.py\n\nTry the following with httpie (a cURL-like utility, http://httpie.org):\n\n $ pip install httpie\n $ http GET :5001/users/\n $ http GET :5001/users/42\n $ http POST :5001/users/ usename=brian first_name=Brian last_name=May\n $ http PATCH :5001/users/42 username=freddie\n $ http GET :5001/users/ limit==1\n\"\"\"\nimport functools\nfrom flask import Flask, request, jsonify\nimport random\n\nfrom marshmallow import Schema, fields, post_dump\nfrom webargs.flaskparser import parser, use_kwargs\n\napp = Flask(__name__)\n\n##### Fake database and models #####\n\n\nclass Model:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n def update(self, **kwargs):\n self.__dict__.update(kwargs)\n\n @classmethod\n def insert(cls, db, **kwargs):\n collection = db[cls.collection]\n new_id = None\n if \"id\" in kwargs: # for setting up fixtures\n new_id = kwargs.pop(\"id\")\n else: # find a new id\n found_id = False\n while not found_id:\n new_id = random.randint(1, 9999)\n if new_id not in collection:\n found_id = True\n new_record = cls(id=new_id, **kwargs)\n collection[new_id] = new_record\n return new_record\n\n\nclass User(Model):\n collection = \"users\"\n\n\ndb = {\"users\": {}}\n\n\n##### use_schema #####\n\n\ndef use_schema(schema, list_view=False, locations=None):\n \"\"\"View decorator for using a marshmallow schema to\n (1) parse a request's input and\n (2) serializing the view's output to a JSON response.\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n use_args_wrapper = parser.use_args(schema, locations=locations)\n # Function wrapped with use_args\n func_with_args = use_args_wrapper(func)\n ret = func_with_args(*args, **kwargs)\n # Serialize and jsonify the return value\n return jsonify(schema.dump(ret, many=list_view).data)\n\n return wrapped\n\n return decorator\n\n\n##### Schemas #####\n\n\nclass UserSchema(Schema):\n id = fields.Int(dump_only=True)\n username = fields.Str()\n first_name = fields.Str()\n last_name = fields.Str()\n\n @post_dump(pass_many=True)\n def wrap_with_envelope(self, data, many, **kwargs):\n return {\"data\": data}\n\n\n##### Routes #####\n\n\[email protected](\"/users/<int:user_id>\", methods=[\"GET\", \"PATCH\"])\n@use_schema(UserSchema())\ndef user_detail(reqargs, user_id):\n user = db[\"users\"].get(user_id)\n if not user:\n return jsonify({\"message\": \"User not found\"}), 404\n if request.method == \"PATCH\" and reqargs:\n user.update(**reqargs)\n return user\n\n\n# You can add additional arguments with use_kwargs\[email protected](\"/users/\", methods=[\"GET\", \"POST\"])\n@use_kwargs({\"limit\": fields.Int(missing=10, location=\"query\")})\n@use_schema(UserSchema(), list_view=True)\ndef user_list(reqargs, limit):\n users = db[\"users\"].values()\n if request.method == \"POST\":\n User.insert(db=db, **reqargs)\n return list(users)[:limit]\n\n\n# Return validation errors as JSON\[email protected](422)\[email protected](400)\ndef handle_validation_error(err):\n exc = getattr(err, \"exc\", None)\n if exc:\n headers = err.data[\"headers\"]\n messages = exc.messages\n else:\n headers = None\n messages = [\"Invalid request.\"]\n if headers:\n return jsonify({\"errors\": messages}), err.code, headers\n else:\n return jsonify({\"errors\": messages}), err.code\n\n\nif __name__ == \"__main__\":\n User.insert(\n db=db, id=42, username=\"fred\", first_name=\"Freddie\", last_name=\"Mercury\"\n )\n app.run(port=5001, debug=True)\n", "path": "examples/schema_example.py" } ]
diff --git a/README.rst b/README.rst index fc36d904..c1de1029 100644 --- a/README.rst +++ b/README.rst @@ -34,11 +34,9 @@ webargs is a Python library for parsing and validating HTTP request objects, wit app = Flask(__name__) - hello_args = {"name": fields.Str(required=True)} - @app.route("/") - @use_args(hello_args) + @use_args({"name": fields.Str(required=True)}) def index(args): return "Hello " + args["name"] diff --git a/docs/advanced.rst b/docs/advanced.rst index 9879dacc..01b04176 100644 --- a/docs/advanced.rst +++ b/docs/advanced.rst @@ -26,7 +26,7 @@ To add your own custom location handler, write a function that receives a reques return "displaying {} posts".format(args["per_page"]) -Marshmallow Integration +marshmallow Integration ----------------------- When you need more flexibility in defining input schemas, you can pass a marshmallow `Schema <marshmallow.Schema>` instead of a dictionary to `Parser.parse <webargs.core.Parser.parse>`, `Parser.use_args <webargs.core.Parser.use_args>`, and `Parser.use_kwargs <webargs.core.Parser.use_kwargs>`. @@ -46,8 +46,9 @@ When you need more flexibility in defining input schemas, you can pass a marshma last_name = fields.Str(missing="") date_registered = fields.DateTime(dump_only=True) - class Meta: - strict = True + # NOTE: Uncomment below two lines if you're using marshmallow 2 + # class Meta: + # strict = True @use_args(UserSchema()) @@ -88,9 +89,12 @@ Consider the following use cases: .. code-block:: python + from flask import Flask from marshmallow import Schema, fields from webargs.flaskparser import use_args + app = Flask(__name__) + class UserSchema(Schema): id = fields.Int(dump_only=True) @@ -100,13 +104,11 @@ Consider the following use cases: last_name = fields.Str(missing="") date_registered = fields.DateTime(dump_only=True) - class Meta: - strict = True - def make_user_schema(request): # Filter based on 'fields' query parameter - only = request.args.get("fields", None) + fields = request.args.get("fields", None) + only = fields.split(",") if fields else None # Respect partial updates for PATCH requests partial = request.method == "PATCH" # Add current request to the schema's context @@ -114,12 +116,14 @@ Consider the following use cases: # Pass the factory to .parse, .use_args, or .use_kwargs + @app.route("/profile/", methods=["GET", "POST", "PATCH"]) @use_args(make_user_schema) def profile_view(args): - username = args["username"] + username = args.get("username") # ... + Reducing Boilerplate ++++++++++++++++++++ @@ -138,14 +142,8 @@ We can reduce boilerplate and improve [re]usability with a simple helper functio only = request.args.get("fields", None) # Respect partial updates for PATCH requests partial = request.method == "PATCH" - # Add current request to the schema's context - # and ensure we're always using strict mode return schema_cls( - only=only, - partial=partial, - strict=True, - context={"request": request}, - **schema_kwargs + only=only, partial=partial, context={"request": request}, **schema_kwargs ) return use_args(factory, **kwargs) @@ -275,9 +273,6 @@ For example, you might implement JSON PATCH according to `RFC 6902 <https://tool path = fields.Str(required=True) value = fields.Str(required=True) - class Meta: - strict = True - @app.route("/profile/", methods=["patch"]) @use_args(PatchSchema(many=True), locations=("json",)) diff --git a/docs/index.rst b/docs/index.rst index 0de96a3f..e152b9f3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,11 +15,9 @@ webargs is a Python library for parsing and validating HTTP request objects, wit app = Flask(__name__) - hello_args = {"name": fields.Str(required=True)} - @app.route("/") - @use_args(hello_args) + @use_args({"name": fields.Str(required=True)}) def index(args): return "Hello " + args["name"] diff --git a/examples/schema_example.py b/examples/schema_example.py index f1177dc8..148918bc 100644 --- a/examples/schema_example.py +++ b/examples/schema_example.py @@ -89,9 +89,6 @@ class UserSchema(Schema): first_name = fields.Str() last_name = fields.Str() - class Meta: - strict = True - @post_dump(pass_many=True) def wrap_with_envelope(self, data, many, **kwargs): return {"data": data}
ansible__molecule-1717
build fails on functional tests shard 1/3 # Issue Type - Bug report # Desired Behavior CI green # Actual Behaviour Build fails on functional tests shard 1/3, for all versions of Ansible and python example: https://travis-ci.com/ansible/molecule/jobs/174166218 ``` test/functional/docker/test_scenarios.py::test_plugins[plugins-docker-default] PASSED [100%] =================================== FAILURES =================================== _____________ test_host_group_vars[host_group_vars-docker-default] _____________ scenario_to_test = 'host_group_vars', with_scenario = None scenario_name = 'default' @skip_unsupported_matrix @pytest.mark.parametrize( 'scenario_to_test, driver_name, scenario_name', [ ('host_group_vars', 'docker', 'default'), ], indirect=[ 'scenario_to_test', 'driver_name', 'scenario_name', ]) def test_host_group_vars(scenario_to_test, with_scenario, scenario_name): options = { 'all': True, } cmd = sh.molecule.bake('test', **options) > out = pytest.helpers.run_command(cmd, log=False) ../../functional/docker/test_scenarios.py:349: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../conftest.py:43: in run_command return util.run_command(cmd) ../../../molecule/util.py:105: in run_command return cmd() ../../../.tox/ansible27-functional/lib/python3.7/site-packages/sh.py:1427: in __call__ return RunningCommand(cmd, call_args, stdin, stdout, stderr) ../../../.tox/ansible27-functional/lib/python3.7/site-packages/sh.py:774: in __init__ self.wait() ../../../.tox/ansible27-functional/lib/python3.7/site-packages/sh.py:792: in wait self.handle_command_exit_code(exit_code) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = --> Validating schema /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/links/molecule.... code = 1 def handle_command_exit_code(self, code): """ here we determine if we had an exception, or an error code that we weren't expecting to see. if we did, we create and raise an exception """ ca = self.call_args exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"], ca["piped"]) if exc_class: exc = exc_class(self.ran, self.process.stdout, self.process.stderr, ca["truncate_exc"]) > raise exc E sh.ErrorReturnCode_1: E E RAN: /home/travis/build/ansible/molecule/.tox/ansible27-functional/bin/molecule test --all E E STDOUT: E --> Validating schema /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/links/molecule.yml. E Validation completed successfully. E --> Validating schema /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/default/molecule.yml. E Validation completed successfully. E --> Test matrix E E ├── links E │ ├── lint E │ ├── destroy E │ ├── dependency E │ ├── syntax E │ ├── create E │ ├── prepare E │ ├── converge E │ ├── idempotence E │ ├── side_effect E │ ├── v... (15851 more, please see e.stdout) E E STDERR: ../../../.tox/ansible27-functional/lib/python3.7/site-packages/sh.py:815: ErrorReturnCode_1 --------------------------- Captured stdout teardown --------------------------- CLEANUP: Destroying instances for all scenario(s) --> Validating schema /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/links/molecule.yml. Validation completed successfully. --> Validating schema /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/default/molecule.yml. Validation completed successfully. --> Test matrix ├── links │ └── destroy └── default └── destroy --> Inventory /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/links/../../host_vars linked to /tmp/molecule/host_group_vars/links/host_vars --> Inventory /home/travis/build/ansible/molecule/test/scenarios/host_group_vars/molecule/links/../../group_vars linked to /tmp/molecule/host_group_vars/links/group_vars --> Scenario: 'links' --> Action: 'destroy' PLAY [Destroy] ***************************************************************** TASK [Destroy molecule instance(s)] ******************************************** changed: [localhost] => (item=None) changed: [localhost] TASK [Wait for instance(s) deletion to complete] ******************************* ok: [localhost] => (item=None) ok: [localhost] TASK [Delete docker network(s)] ************************************************ PLAY RECAP ********************************************************************* localhost : ok=2 changed=1 unreachable=0 failed=0 --> Scenario: 'default' --> Action: 'destroy' PLAY [Destroy] ***************************************************************** TASK [Destroy molecule instance(s)] ******************************************** changed: [localhost] => (item=None) changed: [localhost] TASK [Wait for instance(s) deletion to complete] ******************************* ok: [localhost] => (item=None) ok: [localhost] TASK [Delete docker network(s)] ************************************************ PLAY RECAP ********************************************************************* localhost : ok=2 changed=1 unreachable=0 failed=0 ```
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n\nfrom __future__ import print_function\n\nimport contextlib\nimport fnmatch\nimport jinja2\nimport os\nimport re\nimport sys\n\nimport anyconfig\nimport colorama\nimport yaml\n\nfrom molecule import logger\n\nLOG = logger.get_logger(__name__)\nMERGE_STRATEGY = anyconfig.MS_DICTS\n\ncolorama.init(autoreset=True)\n\n\nclass SafeDumper(yaml.SafeDumper):\n def increase_indent(self, flow=False, indentless=False):\n return super(SafeDumper, self).increase_indent(flow, False)\n\n\ndef print_debug(title, data):\n title = 'DEBUG: {}'.format(title)\n title = [\n colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK, title,\n colorama.Fore.RESET, colorama.Back.RESET, colorama.Style.RESET_ALL\n ]\n print(''.join(title))\n data = [\n colorama.Fore.BLACK, colorama.Style.BRIGHT, data,\n colorama.Style.RESET_ALL, colorama.Fore.RESET\n ]\n print(''.join(data))\n\n\ndef print_environment_vars(env):\n \"\"\"\n Print ``Ansible`` and ``Molecule`` environment variables and returns None.\n\n :param env: A dict containing the shell's environment as collected by\n ``os.environ``.\n :return: None\n \"\"\"\n ansible_env = {k: v for (k, v) in env.items() if 'ANSIBLE_' in k}\n print_debug('ANSIBLE ENVIRONMENT', safe_dump(ansible_env))\n\n molecule_env = {k: v for (k, v) in env.items() if 'MOLECULE_' in k}\n print_debug('MOLECULE ENVIRONMENT', safe_dump(molecule_env))\n\n combined_env = ansible_env.copy()\n combined_env.update(molecule_env)\n print_debug(\n 'SHELL REPLAY', \" \".join(\n [\"{}={}\".format(k, v) for (k, v) in sorted(combined_env.items())]))\n print()\n\n\ndef sysexit(code=1):\n sys.exit(code)\n\n\ndef sysexit_with_message(msg, code=1):\n LOG.critical(msg)\n sysexit(code)\n\n\ndef run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and returns None.\n\n :param cmd: A ``sh.Command`` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: ``sh`` object\n \"\"\"\n if debug:\n # WARN(retr0h): Uses an internal ``sh`` data structure to dig\n # the environment out of the ``sh.command`` object.\n print_environment_vars(cmd._partial_call_args.get('env', {}))\n print_debug('COMMAND', str(cmd))\n print()\n return cmd()\n\n\ndef os_walk(directory, pattern, excludes=[]):\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if d not in excludes]\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n\n yield filename\n\n\ndef render_template(template, **kwargs):\n t = jinja2.Environment()\n t = t.from_string(template)\n\n return t.render(kwargs)\n\n\ndef write_file(filename, content):\n \"\"\"\n Writes a file with the given filename and content and returns None.\n\n :param filename: A string containing the target filename.\n :param content: A string containing the data to be written.\n :return: None\n \"\"\"\n with open_file(filename, 'w') as f:\n f.write(content)\n\n file_prepender(filename)\n\n\ndef molecule_prepender(content):\n return '# Molecule managed\\n\\n' + content\n\n\ndef file_prepender(filename):\n \"\"\"\n Prepend an informational header on files managed by Molecule and returns\n None.\n\n :param filename: A string containing the target filename.\n :return: None\n \"\"\"\n with open_file(filename, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(molecule_prepender(content))\n\n\ndef safe_dump(data):\n \"\"\"\n Dump the provided data to a YAML document and returns a string.\n\n :param data: A string containing an absolute path to the file to parse.\n :return: str\n \"\"\"\n # TODO(retr0h): Do we need to encode?\n # yaml.dump(data) produces the document as a str object in both python\n # 2 and 3.\n return yaml.dump(\n data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True)\n\n\ndef safe_load(string):\n \"\"\"\n Parse the provided string returns a dict.\n\n :param string: A string to be parsed.\n :return: dict\n \"\"\"\n try:\n return yaml.safe_load(string) or {}\n except yaml.scanner.ScannerError as e:\n sysexit_with_message(str(e))\n\n\ndef safe_load_file(filename):\n \"\"\"\n Parse the provided YAML file and returns a dict.\n\n :param filename: A string containing an absolute path to the file to parse.\n :return: dict\n \"\"\"\n with open_file(filename) as stream:\n return safe_load(stream)\n\n\[email protected]\ndef open_file(filename, mode='r'):\n \"\"\"\n Open the provide file safely and returns a file type.\n\n :param filename: A string containing an absolute path to the file to open.\n :param mode: A string describing the way in which the file will be used.\n :return: file type\n \"\"\"\n with open(filename, mode) as stream:\n yield stream\n\n\ndef instance_with_scenario_name(instance_name, scenario_name):\n return '{}-{}'.format(instance_name, scenario_name)\n\n\ndef strip_ansi_escape(string):\n return re.sub(r'\\x1b[^m]*m', '', string)\n\n\ndef strip_ansi_color(s):\n # Taken from tabulate\n invisible_codes = re.compile(r'\\x1b\\[\\d*m')\n\n return re.sub(invisible_codes, '', s)\n\n\ndef verbose_flag(options):\n verbose = 'v'\n verbose_flag = []\n for i in range(0, 3):\n if options.get(verbose):\n verbose_flag = ['-{}'.format(verbose)]\n del options[verbose]\n if options.get('verbose'):\n del options['verbose']\n break\n verbose = verbose + 'v'\n\n return verbose_flag\n\n\ndef filter_verbose_permutation(options):\n return {k: options[k] for k in options if not re.match('^[v]+$', k)}\n\n\ndef title(word):\n return ' '.join(x.capitalize() or '_' for x in word.split('_'))\n\n\ndef abs_path(path):\n if path:\n return os.path.abspath(path)\n\n\ndef camelize(string):\n # NOTE(retr0h): Taken from jpvanhal/inflection\n # https://github.com/jpvanhal/inflection\n return re.sub(r\"(?:^|_)(.)\", lambda m: m.group(1).upper(), string)\n\n\ndef underscore(string):\n # NOTE(retr0h): Taken from jpvanhal/inflection\n # https://github.com/jpvanhal/inflection\n string = re.sub(r\"([A-Z]+)([A-Z][a-z])\", r'\\1_\\2', string)\n string = re.sub(r\"([a-z\\d])([A-Z])\", r'\\1_\\2', string)\n string = string.replace(\"-\", \"_\")\n\n return string.lower()\n\n\ndef merge_dicts(a, b):\n \"\"\"\n Merges the values of B into A and returns a mutated dict A.\n\n ::\n\n dict a\n\n b:\n - c: 0\n - c: 2\n d:\n e: \"aaa\"\n f: 3\n\n dict b\n\n a: 1\n b:\n - c: 3\n d:\n e: \"bbb\"\n\n Will give an object such as::\n\n {'a': 1, 'b': [{'c': 3}], 'd': {'e': \"bbb\", 'f': 3}}\n\n\n :param a: the target dictionary\n :param b: the dictionary to import\n :return: dict\n \"\"\"\n anyconfig.merge(a, b, ac_merge=MERGE_STRATEGY)\n\n return a\n\n\ndef memoize(function):\n memo = {}\n\n def wrapper(*args, **kwargs):\n if args not in memo:\n rv = function(*args, **kwargs)\n memo[args] = rv\n\n return rv\n return memo[args]\n\n return wrapper\n", "path": "molecule/util.py" } ]
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n\nfrom __future__ import print_function\n\nimport contextlib\nimport fnmatch\nimport jinja2\nimport os\nimport re\nimport sys\n\nimport anyconfig\nimport colorama\nimport yaml\n\nfrom molecule import logger\n\nLOG = logger.get_logger(__name__)\nMERGE_STRATEGY = anyconfig.MS_DICTS\n\ncolorama.init(autoreset=True)\n\n\nclass SafeDumper(yaml.SafeDumper):\n def increase_indent(self, flow=False, indentless=False):\n return super(SafeDumper, self).increase_indent(flow, False)\n\n\ndef print_debug(title, data):\n title = 'DEBUG: {}'.format(title)\n title = [\n colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK, title,\n colorama.Fore.RESET, colorama.Back.RESET, colorama.Style.RESET_ALL\n ]\n print(''.join(title))\n data = [\n colorama.Fore.BLACK, colorama.Style.BRIGHT, data,\n colorama.Style.RESET_ALL, colorama.Fore.RESET\n ]\n print(''.join(data))\n\n\ndef print_environment_vars(env):\n \"\"\"\n Print ``Ansible`` and ``Molecule`` environment variables and returns None.\n\n :param env: A dict containing the shell's environment as collected by\n ``os.environ``.\n :return: None\n \"\"\"\n ansible_env = {k: v for (k, v) in env.items() if 'ANSIBLE_' in k}\n print_debug('ANSIBLE ENVIRONMENT', safe_dump(ansible_env))\n\n molecule_env = {k: v for (k, v) in env.items() if 'MOLECULE_' in k}\n print_debug('MOLECULE ENVIRONMENT', safe_dump(molecule_env))\n\n combined_env = ansible_env.copy()\n combined_env.update(molecule_env)\n print_debug(\n 'SHELL REPLAY', \" \".join(\n [\"{}={}\".format(k, v) for (k, v) in sorted(combined_env.items())]))\n print()\n\n\ndef sysexit(code=1):\n sys.exit(code)\n\n\ndef sysexit_with_message(msg, code=1):\n LOG.critical(msg)\n sysexit(code)\n\n\ndef run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and returns None.\n\n :param cmd: A ``sh.Command`` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: ``sh`` object\n \"\"\"\n if debug:\n # WARN(retr0h): Uses an internal ``sh`` data structure to dig\n # the environment out of the ``sh.command`` object.\n print_environment_vars(cmd._partial_call_args.get('env', {}))\n print_debug('COMMAND', str(cmd))\n print()\n return cmd(_truncate_exc=False)\n\n\ndef os_walk(directory, pattern, excludes=[]):\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if d not in excludes]\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n\n yield filename\n\n\ndef render_template(template, **kwargs):\n t = jinja2.Environment()\n t = t.from_string(template)\n\n return t.render(kwargs)\n\n\ndef write_file(filename, content):\n \"\"\"\n Writes a file with the given filename and content and returns None.\n\n :param filename: A string containing the target filename.\n :param content: A string containing the data to be written.\n :return: None\n \"\"\"\n with open_file(filename, 'w') as f:\n f.write(content)\n\n file_prepender(filename)\n\n\ndef molecule_prepender(content):\n return '# Molecule managed\\n\\n' + content\n\n\ndef file_prepender(filename):\n \"\"\"\n Prepend an informational header on files managed by Molecule and returns\n None.\n\n :param filename: A string containing the target filename.\n :return: None\n \"\"\"\n with open_file(filename, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(molecule_prepender(content))\n\n\ndef safe_dump(data):\n \"\"\"\n Dump the provided data to a YAML document and returns a string.\n\n :param data: A string containing an absolute path to the file to parse.\n :return: str\n \"\"\"\n # TODO(retr0h): Do we need to encode?\n # yaml.dump(data) produces the document as a str object in both python\n # 2 and 3.\n return yaml.dump(\n data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True)\n\n\ndef safe_load(string):\n \"\"\"\n Parse the provided string returns a dict.\n\n :param string: A string to be parsed.\n :return: dict\n \"\"\"\n try:\n return yaml.safe_load(string) or {}\n except yaml.scanner.ScannerError as e:\n sysexit_with_message(str(e))\n\n\ndef safe_load_file(filename):\n \"\"\"\n Parse the provided YAML file and returns a dict.\n\n :param filename: A string containing an absolute path to the file to parse.\n :return: dict\n \"\"\"\n with open_file(filename) as stream:\n return safe_load(stream)\n\n\[email protected]\ndef open_file(filename, mode='r'):\n \"\"\"\n Open the provide file safely and returns a file type.\n\n :param filename: A string containing an absolute path to the file to open.\n :param mode: A string describing the way in which the file will be used.\n :return: file type\n \"\"\"\n with open(filename, mode) as stream:\n yield stream\n\n\ndef instance_with_scenario_name(instance_name, scenario_name):\n return '{}-{}'.format(instance_name, scenario_name)\n\n\ndef strip_ansi_escape(string):\n return re.sub(r'\\x1b[^m]*m', '', string)\n\n\ndef strip_ansi_color(s):\n # Taken from tabulate\n invisible_codes = re.compile(r'\\x1b\\[\\d*m')\n\n return re.sub(invisible_codes, '', s)\n\n\ndef verbose_flag(options):\n verbose = 'v'\n verbose_flag = []\n for i in range(0, 3):\n if options.get(verbose):\n verbose_flag = ['-{}'.format(verbose)]\n del options[verbose]\n if options.get('verbose'):\n del options['verbose']\n break\n verbose = verbose + 'v'\n\n return verbose_flag\n\n\ndef filter_verbose_permutation(options):\n return {k: options[k] for k in options if not re.match('^[v]+$', k)}\n\n\ndef title(word):\n return ' '.join(x.capitalize() or '_' for x in word.split('_'))\n\n\ndef abs_path(path):\n if path:\n return os.path.abspath(path)\n\n\ndef camelize(string):\n # NOTE(retr0h): Taken from jpvanhal/inflection\n # https://github.com/jpvanhal/inflection\n return re.sub(r\"(?:^|_)(.)\", lambda m: m.group(1).upper(), string)\n\n\ndef underscore(string):\n # NOTE(retr0h): Taken from jpvanhal/inflection\n # https://github.com/jpvanhal/inflection\n string = re.sub(r\"([A-Z]+)([A-Z][a-z])\", r'\\1_\\2', string)\n string = re.sub(r\"([a-z\\d])([A-Z])\", r'\\1_\\2', string)\n string = string.replace(\"-\", \"_\")\n\n return string.lower()\n\n\ndef merge_dicts(a, b):\n \"\"\"\n Merges the values of B into A and returns a mutated dict A.\n\n ::\n\n dict a\n\n b:\n - c: 0\n - c: 2\n d:\n e: \"aaa\"\n f: 3\n\n dict b\n\n a: 1\n b:\n - c: 3\n d:\n e: \"bbb\"\n\n Will give an object such as::\n\n {'a': 1, 'b': [{'c': 3}], 'd': {'e': \"bbb\", 'f': 3}}\n\n\n :param a: the target dictionary\n :param b: the dictionary to import\n :return: dict\n \"\"\"\n anyconfig.merge(a, b, ac_merge=MERGE_STRATEGY)\n\n return a\n\n\ndef memoize(function):\n memo = {}\n\n def wrapper(*args, **kwargs):\n if args not in memo:\n rv = function(*args, **kwargs)\n memo[args] = rv\n\n return rv\n return memo[args]\n\n return wrapper\n", "path": "molecule/util.py" } ]
diff --git a/molecule/util.py b/molecule/util.py index 1c64367502..426cae35ae 100644 --- a/molecule/util.py +++ b/molecule/util.py @@ -102,7 +102,7 @@ def run_command(cmd, debug=False): print_environment_vars(cmd._partial_call_args.get('env', {})) print_debug('COMMAND', str(cmd)) print() - return cmd() + return cmd(_truncate_exc=False) def os_walk(directory, pattern, excludes=[]): diff --git a/test/scenarios/host_group_vars/molecule/default/playbook.yml b/test/scenarios/host_group_vars/molecule/default/playbook.yml index 074a053525..b42d93e651 100644 --- a/test/scenarios/host_group_vars/molecule/default/playbook.yml +++ b/test/scenarios/host_group_vars/molecule/default/playbook.yml @@ -10,7 +10,8 @@ changed_when: false - name: Group vars group_var for group example from molecule.yml - command: echo "{{ host_group_vars_example_group_one_molecule_yml }} {{ host_group_vars_example_group_two_molecule_yml }}" + command: |- + echo "{{ host_group_vars_example_group_one_molecule_yml }} {{ host_group_vars_example_group_two_molecule_yml }}" changed_when: false - name: Group vars from group_vars existing directory
qtile__qtile-1645
lazy.when # Issue description I found this quite oldy [`config.py`](https://gist.github.com/cjbarnes18/4151805) that uses a `when` operation to limit the keybinding to a layout but it throws an exception. I also couldn't find it in the docs, so is it supposed to exist? Is there another similar intended way to limit key bindings to a layout. ```python from libqtile.lazy import lazy ``` ... ```python Key([mod], 'Left', lazy.layout.left().when('xmonadtall')) ``` # Qtile version v0.15.0 # Stack traces ``` libqtile xcore.py:_xpoll():L277 Got an exception in poll loop Traceback (most recent call last): File "/share/git/qtile/libqtile/backend/x11/xcore.py", line 246, in _xpoll ret = target(event) File "/share/git/qtile/libqtile/backend/x11/xcore.py", line 494, in handle_KeyPress self.qtile.process_key_event(keysym, event.state & self._valid_mask) File "/share/git/qtile/libqtile/core/manager.py", line 342, in process_key_event if cmd.check(self): AttributeError: 'NoneType' object has no attribute 'check' ``` Thanks for any help
[ { "content": "# Copyright (c) 2019, Sean Vig. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom typing import Dict, List, Optional, Tuple, Union # noqa: F401\n\nfrom libqtile.command_client import InteractiveCommandClient\nfrom libqtile.command_graph import (\n CommandGraphCall,\n CommandGraphNode,\n SelectorType,\n)\nfrom libqtile.command_interface import CommandInterface\n\n\nclass LazyCall:\n def __init__(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> None:\n \"\"\"The lazily evaluated command graph call\n\n Parameters\n ----------\n call : CommandGraphCall\n The call that is made\n args : Tuple\n The args passed to the call when it is evaluated.\n kwargs : Dict\n The kwargs passed to the call when it is evaluated.\n \"\"\"\n self._call = call\n self._args = args\n self._kwargs = kwargs\n\n self._layout = None # type: Optional[str]\n self._when_floating = True\n\n @property\n def selectors(self) -> List[SelectorType]:\n \"\"\"The selectors for the given call\"\"\"\n return self._call.selectors\n\n @property\n def name(self) -> str:\n \"\"\"The name of the given call\"\"\"\n return self._call.name\n\n @property\n def args(self) -> Tuple:\n \"\"\"The args to the given call\"\"\"\n return self._args\n\n @property\n def kwargs(self) -> Dict:\n \"\"\"The kwargs to the given call\"\"\"\n return self._kwargs\n\n def when(self, layout=None, when_floating=True):\n self._layout = layout\n self._when_floating = when_floating\n\n def check(self, q) -> bool:\n if self._layout is not None:\n if self._layout == 'floating':\n if q.current_window.floating:\n return True\n return False\n if q.current_layout.name != self._layout:\n if q.current_window and q.current_window.floating and not self._when_floating:\n return False\n return True\n\n\nclass LazyCommandObject(CommandInterface):\n \"\"\"A lazy loading command object\n\n Allows all commands and items to be resolved at run time, and returns\n lazily evaluated commands.\n \"\"\"\n\n def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> LazyCall:\n \"\"\"Lazily evaluate the given call\"\"\"\n return LazyCall(call, args, kwargs)\n\n def has_command(self, node: CommandGraphNode, command: str) -> bool:\n \"\"\"Lazily resolve the given command\"\"\"\n return True\n\n def has_item(self, node: CommandGraphNode, object_type: str, item: Union[str, int]) -> bool:\n \"\"\"Lazily resolve the given item\"\"\"\n return True\n\n\nlazy = InteractiveCommandClient(LazyCommandObject())\n", "path": "libqtile/lazy.py" } ]
[ { "content": "# Copyright (c) 2019, Sean Vig. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom typing import Dict, List, Optional, Tuple, Union # noqa: F401\n\nfrom libqtile.command_client import InteractiveCommandClient\nfrom libqtile.command_graph import (\n CommandGraphCall,\n CommandGraphNode,\n SelectorType,\n)\nfrom libqtile.command_interface import CommandInterface\n\n\nclass LazyCall:\n def __init__(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> None:\n \"\"\"The lazily evaluated command graph call\n\n Parameters\n ----------\n call : CommandGraphCall\n The call that is made\n args : Tuple\n The args passed to the call when it is evaluated.\n kwargs : Dict\n The kwargs passed to the call when it is evaluated.\n \"\"\"\n self._call = call\n self._args = args\n self._kwargs = kwargs\n\n self._layout = None # type: Optional[str]\n self._when_floating = True\n\n @property\n def selectors(self) -> List[SelectorType]:\n \"\"\"The selectors for the given call\"\"\"\n return self._call.selectors\n\n @property\n def name(self) -> str:\n \"\"\"The name of the given call\"\"\"\n return self._call.name\n\n @property\n def args(self) -> Tuple:\n \"\"\"The args to the given call\"\"\"\n return self._args\n\n @property\n def kwargs(self) -> Dict:\n \"\"\"The kwargs to the given call\"\"\"\n return self._kwargs\n\n def when(self, layout=None, when_floating=True):\n self._layout = layout\n self._when_floating = when_floating\n return self\n\n def check(self, q) -> bool:\n if self._layout is not None:\n if self._layout == 'floating':\n if q.current_window.floating:\n return True\n return False\n if q.current_layout.name != self._layout:\n if q.current_window and q.current_window.floating and not self._when_floating:\n return False\n return True\n\n\nclass LazyCommandObject(CommandInterface):\n \"\"\"A lazy loading command object\n\n Allows all commands and items to be resolved at run time, and returns\n lazily evaluated commands.\n \"\"\"\n\n def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> LazyCall:\n \"\"\"Lazily evaluate the given call\"\"\"\n return LazyCall(call, args, kwargs)\n\n def has_command(self, node: CommandGraphNode, command: str) -> bool:\n \"\"\"Lazily resolve the given command\"\"\"\n return True\n\n def has_item(self, node: CommandGraphNode, object_type: str, item: Union[str, int]) -> bool:\n \"\"\"Lazily resolve the given item\"\"\"\n return True\n\n\nlazy = InteractiveCommandClient(LazyCommandObject())\n", "path": "libqtile/lazy.py" } ]
diff --git a/libqtile/lazy.py b/libqtile/lazy.py index c30c4d0542..0ab1dffea6 100644 --- a/libqtile/lazy.py +++ b/libqtile/lazy.py @@ -72,6 +72,7 @@ def kwargs(self) -> Dict: def when(self, layout=None, when_floating=True): self._layout = layout self._when_floating = when_floating + return self def check(self, q) -> bool: if self._layout is not None:
chainer__chainer-258
Shape of output value of `concat` is list in GPU `cuda.empty([1])` makes a `GPUArray` with `list` shape. It causes a type invalid error. https://github.com/pfnet/chainer/blob/master/chainer/functions/concat.py#L69
[ { "content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py" } ]
[ { "content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n shape = tuple(shape)\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py" } ]
diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py index 748616e60cb7..3828b9b8e68d 100644 --- a/chainer/functions/concat.py +++ b/chainer/functions/concat.py @@ -64,6 +64,7 @@ def forward_gpu(self, xs): shape = list(xs[0].shape) for x in xs[1:]: shape[self.axis] += x.shape[self.axis] + shape = tuple(shape) self.shape = shape y = cuda.empty(shape, dtype=xs[0].dtype) diff --git a/tests/functions_tests/test_concat.py b/tests/functions_tests/test_concat.py index 1169faf30d2d..6bb66a2d942c 100644 --- a/tests/functions_tests/test_concat.py +++ b/tests/functions_tests/test_concat.py @@ -20,6 +20,7 @@ def check_forward(self, xs_data, y_data, axis): xs = tuple(chainer.Variable(x_data) for x_data in xs_data) y = functions.concat(xs, axis=axis) gradient_check.assert_allclose(y_data, y.data, atol=0, rtol=0) + self.assertIsInstance(y.data.shape, tuple) def test_forward_cpu(self): self.check_forward(self.xs, self.y, axis=self.axis)
Kinto__kinto-1752
Deleting a collection doesn't delete access_control_entries for its children `buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.
[ { "content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id)\n", "path": "kinto/views/collections.py" } ]
[ { "content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id + '/*')\n", "path": "kinto/views/collections.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 193a066de..cb740b1a0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,7 +7,8 @@ This document describes changes between each past release. 10.0.1 (unreleased) ------------------- -- Nothing changed yet. +**Bug fixes** +- Deleting a collection doesn't delete access_control_entrries for its children (fixes #1647) 10.0.0 (2018-08-16) @@ -35,11 +36,11 @@ This document describes changes between each past release. - Raise a configuration error if the ``kinto.plugin.accounts`` is included without being enabled in policies. Without this *kinto-admin* would present a confusing login experience (fixes #1734). - **Internal changes** - Upgrade kinto-admin to v1.20.0 + 9.2.3 (2018-07-05) ------------------ diff --git a/CONTRIBUTORS.rst b/CONTRIBUTORS.rst index c43d6a8c8..ab472d2d8 100644 --- a/CONTRIBUTORS.rst +++ b/CONTRIBUTORS.rst @@ -6,6 +6,7 @@ Contributors * Aditya Bhasin <[email protected]> * Aiman Parvaiz <[email protected]> * Anh <[email protected]> +* Alexander Ryabkov <[email protected]> * Alexis Metaireau <[email protected]> * Andy McKay <[email protected]> * Anthony Garuccio <[email protected]> diff --git a/kinto/views/collections.py b/kinto/views/collections.py index 8c3528d0e..e27388b89 100644 --- a/kinto/views/collections.py +++ b/kinto/views/collections.py @@ -56,4 +56,4 @@ def on_collections_deleted(event): with_deleted=False) storage.purge_deleted(collection_id=None, parent_id=parent_id) - permission.delete_object_permissions(parent_id) + permission.delete_object_permissions(parent_id + '/*') diff --git a/tests/test_views_collections.py b/tests/test_views_collections.py index f561963ba..544eaff29 100644 --- a/tests/test_views_collections.py +++ b/tests/test_views_collections.py @@ -158,6 +158,14 @@ def test_can_be_created_after_deletion_with_if_none_match_star(self): self.app.put_json(self.collection_url, MINIMALIST_COLLECTION, headers=headers, status=201) + def test_collection_permissions_are_removed_after_collection_deleted(self): + self.assertDictEqual(self.permission.get_object_permissions( + self.collection_url), {}) + + def test_records_permissions_are_removed_after_collection_deleted(self): + self.assertDictEqual(self.permission.get_object_permissions( + self.record_url), {}) + class CollectionCreationTest(BaseWebTest, unittest.TestCase):
Pycord-Development__pycord-2295
After is set to id=0 even if you pass a value for after in https://docs.pycord.dev/en/stable/api/models.html#discord.Guild.audit_logs ### Summary After is not updated to the value passed. This result in all audit log being fetch ### Reproduction Steps 1) Call audit_logs() with a value for after that is not the oldest entry id ### Minimal Reproducible Code ```python object_id = discord.Object(id=any_audit_log_id) async for entry in guild.audit_logs(after=object_id): pass ``` ### Expected Results Received only the entries after the value after ### Actual Results Received all entry, even the ones before the value after specifed ### Intents view_audit_log ### System Information - Python v3.11.1-final - py-cord-dev v2.5.None-candidate - py-cord-dev pkg_resources: v2.5.0rc5 - aiohttp v3.8.5 - system info: Windows 10 10.0.22631 ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context Here is the inital question in discord help server: [inital question](https://discord.com/channels/881207955029110855/1132206148309749830/1186522479750029393) Here is the problem in the code that I identified: [problem](https://discord.com/channels/881207955029110855/1132206148309749830/1186537206358229032) Here is the potential solution: [solution](https://discord.com/channels/881207955029110855/1132206148309749830/1186541811683033168)
[ { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport datetime\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterator,\n Awaitable,\n Callable,\n List,\n TypeVar,\n Union,\n)\n\nfrom .audit_logs import AuditLogEntry\nfrom .errors import NoMoreItems\nfrom .object import Object\nfrom .utils import maybe_coroutine, snowflake_time, time_snowflake\n\n__all__ = (\n \"ReactionIterator\",\n \"HistoryIterator\",\n \"AuditLogIterator\",\n \"GuildIterator\",\n \"MemberIterator\",\n \"ScheduledEventSubscribersIterator\",\n)\n\nif TYPE_CHECKING:\n from .abc import Snowflake\n from .guild import BanEntry, Guild\n from .member import Member\n from .message import Message\n from .scheduled_events import ScheduledEvent\n from .threads import Thread\n from .types.audit_log import AuditLog as AuditLogPayload\n from .types.guild import Guild as GuildPayload\n from .types.message import Message as MessagePayload\n from .types.threads import Thread as ThreadPayload\n from .types.user import PartialUser as PartialUserPayload\n from .user import User\n\nT = TypeVar(\"T\")\nOT = TypeVar(\"OT\")\n_Func = Callable[[T], Union[OT, Awaitable[OT]]]\n\nOLDEST_OBJECT = Object(id=0)\n\n\nclass _AsyncIterator(AsyncIterator[T]):\n __slots__ = ()\n\n async def next(self) -> T:\n raise NotImplementedError\n\n def get(self, **attrs: Any) -> Awaitable[T | None]:\n def predicate(elem: T):\n for attr, val in attrs.items():\n nested = attr.split(\"__\")\n obj = elem\n for attribute in nested:\n obj = getattr(obj, attribute)\n\n if obj != val:\n return False\n return True\n\n return self.find(predicate)\n\n async def find(self, predicate: _Func[T, bool]) -> T | None:\n while True:\n try:\n elem = await self.next()\n except NoMoreItems:\n return None\n\n ret = await maybe_coroutine(predicate, elem)\n if ret:\n return elem\n\n def chunk(self, max_size: int) -> _ChunkedAsyncIterator[T]:\n if max_size <= 0:\n raise ValueError(\"async iterator chunk sizes must be greater than 0.\")\n return _ChunkedAsyncIterator(self, max_size)\n\n def map(self, func: _Func[T, OT]) -> _MappedAsyncIterator[OT]:\n return _MappedAsyncIterator(self, func)\n\n def filter(self, predicate: _Func[T, bool]) -> _FilteredAsyncIterator[T]:\n return _FilteredAsyncIterator(self, predicate)\n\n async def flatten(self) -> list[T]:\n return [element async for element in self]\n\n async def __anext__(self) -> T:\n try:\n return await self.next()\n except NoMoreItems:\n raise StopAsyncIteration()\n\n\ndef _identity(x):\n return x\n\n\nclass _ChunkedAsyncIterator(_AsyncIterator[List[T]]):\n def __init__(self, iterator, max_size):\n self.iterator = iterator\n self.max_size = max_size\n\n async def next(self) -> list[T]:\n ret: list[T] = []\n n = 0\n while n < self.max_size:\n try:\n item = await self.iterator.next()\n except NoMoreItems:\n if ret:\n return ret\n raise\n else:\n ret.append(item)\n n += 1\n return ret\n\n\nclass _MappedAsyncIterator(_AsyncIterator[T]):\n def __init__(self, iterator, func):\n self.iterator = iterator\n self.func = func\n\n async def next(self) -> T:\n # this raises NoMoreItems and will propagate appropriately\n item = await self.iterator.next()\n return await maybe_coroutine(self.func, item)\n\n\nclass _FilteredAsyncIterator(_AsyncIterator[T]):\n def __init__(self, iterator, predicate):\n self.iterator = iterator\n\n if predicate is None:\n predicate = _identity\n\n self.predicate = predicate\n\n async def next(self) -> T:\n getter = self.iterator.next\n pred = self.predicate\n while True:\n # propagate NoMoreItems similar to _MappedAsyncIterator\n item = await getter()\n ret = await maybe_coroutine(pred, item)\n if ret:\n return item\n\n\nclass ReactionIterator(_AsyncIterator[Union[\"User\", \"Member\"]]):\n def __init__(self, message, emoji, limit=100, after=None, type=None):\n self.message = message\n self.limit = limit\n self.after = after\n self.type = type\n state = message._state\n self.getter = state.http.get_reaction_users\n self.state = state\n self.emoji = emoji\n self.guild = message.guild\n self.channel_id = message.channel.id\n self.users = asyncio.Queue()\n\n async def next(self) -> User | Member:\n if self.users.empty():\n await self.fill_users()\n\n try:\n return self.users.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n async def fill_users(self):\n # this is a hack because >circular imports<\n from .user import User\n\n if self.limit > 0:\n retrieve = min(self.limit, 100)\n\n after = self.after.id if self.after else None\n data: list[PartialUserPayload] = await self.getter(\n self.channel_id,\n self.message.id,\n self.emoji,\n retrieve,\n after=after,\n type=self.type,\n )\n\n if data:\n self.limit -= retrieve\n self.after = Object(id=int(data[-1][\"id\"]))\n\n for element in reversed(data):\n if self.guild is None or isinstance(self.guild, Object):\n await self.users.put(User(state=self.state, data=element))\n else:\n member_id = int(element[\"id\"])\n member = self.guild.get_member(member_id)\n if member is not None:\n await self.users.put(member)\n else:\n await self.users.put(User(state=self.state, data=element))\n\n\nclass HistoryIterator(_AsyncIterator[\"Message\"]):\n \"\"\"Iterator for receiving a channel's message history.\n\n The messages endpoint has two behaviours we care about here:\n If ``before`` is specified, the messages endpoint returns the `limit`\n newest messages before ``before``, sorted with newest first. For filling over\n 100 messages, update the ``before`` parameter to the oldest message received.\n Messages will be returned in order by time.\n If ``after`` is specified, it returns the ``limit`` oldest messages after\n ``after``, sorted with newest first. For filling over 100 messages, update the\n ``after`` parameter to the newest message received. If messages are not\n reversed, they will be out of order (99-0, 199-100, so on)\n\n A note that if both ``before`` and ``after`` are specified, ``before`` is ignored by the\n messages endpoint.\n\n Parameters\n ----------\n messageable: :class:`abc.Messageable`\n Messageable class to retrieve message history from.\n limit: :class:`int`\n Maximum number of messages to retrieve\n before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message before which all messages must be.\n after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message after which all messages must be.\n around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message around which all messages must be. Limit max 101. Note that if\n limit is an even number, this will return at most limit+1 messages.\n oldest_first: Optional[:class:`bool`]\n If set to ``True``, return messages in oldest->newest order. Defaults to\n ``True`` if `after` is specified, otherwise ``False``.\n \"\"\"\n\n def __init__(\n self,\n messageable,\n limit,\n before=None,\n after=None,\n around=None,\n oldest_first=None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n if isinstance(around, datetime.datetime):\n around = Object(id=time_snowflake(around))\n\n self.reverse = after is not None if oldest_first is None else oldest_first\n self.messageable = messageable\n self.limit = limit\n self.before = before\n self.after = after or OLDEST_OBJECT\n self.around = around\n\n self._filter = None # message dict -> bool\n\n self.state = self.messageable._state\n self.logs_from = self.state.http.logs_from\n self.messages = asyncio.Queue()\n\n if self.around:\n if self.limit is None:\n raise ValueError(\"history does not support around with limit=None\")\n if self.limit > 101:\n raise ValueError(\n \"history max limit 101 when specifying around parameter\"\n )\n elif self.limit == 101:\n self.limit = 100 # Thanks discord\n\n self._retrieve_messages = self._retrieve_messages_around_strategy # type: ignore\n if self.before and self.after:\n self._filter = lambda m: self.after.id < int(m[\"id\"]) < self.before.id\n elif self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n elif self.after:\n self._filter = lambda m: self.after.id < int(m[\"id\"])\n elif self.reverse:\n self._retrieve_messages = self._retrieve_messages_after_strategy # type: ignore\n if self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n else:\n self._retrieve_messages = self._retrieve_messages_before_strategy # type: ignore\n if self.after and self.after != OLDEST_OBJECT:\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n\n async def next(self) -> Message:\n if self.messages.empty():\n await self.fill_messages()\n\n try:\n return self.messages.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_messages(self):\n if not hasattr(self, \"channel\"):\n # do the required set up\n channel = await self.messageable._get_channel()\n self.channel = channel\n\n if self._get_retrieve():\n data = await self._retrieve_messages(self.retrieve)\n if len(data) < 100:\n self.limit = 0 # terminate the infinite loop\n\n if self.reverse:\n data = reversed(data)\n if self._filter:\n data = filter(self._filter, data)\n\n channel = self.channel\n for element in data:\n await self.messages.put(\n self.state.create_message(channel=channel, data=element)\n )\n\n async def _retrieve_messages(self, retrieve) -> list[Message]:\n \"\"\"Retrieve messages and update next parameters.\"\"\"\n raise NotImplementedError\n\n async def _retrieve_messages_before_strategy(self, retrieve):\n \"\"\"Retrieve messages using before parameter.\"\"\"\n before = self.before.id if self.before else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, before=before\n )\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(data[-1][\"id\"]))\n return data\n\n async def _retrieve_messages_after_strategy(self, retrieve):\n \"\"\"Retrieve messages using after parameter.\"\"\"\n after = self.after.id if self.after else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, after=after\n )\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(data[0][\"id\"]))\n return data\n\n async def _retrieve_messages_around_strategy(self, retrieve):\n \"\"\"Retrieve messages using around parameter.\"\"\"\n if self.around:\n around = self.around.id if self.around else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, around=around\n )\n self.around = None\n return data\n return []\n\n\nclass AuditLogIterator(_AsyncIterator[\"AuditLogEntry\"]):\n def __init__(\n self,\n guild,\n limit=None,\n before=None,\n after=None,\n oldest_first=None,\n user_id=None,\n action_type=None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.reverse = after is not None if oldest_first is None else oldest_first\n self.guild = guild\n self.loop = guild._state.loop\n self.request = guild._state.http.get_audit_logs\n self.limit = limit\n self.before = before\n self.user_id = user_id\n self.action_type = action_type\n self.after = OLDEST_OBJECT\n self._users = {}\n self._state = guild._state\n\n self._filter = None # entry dict -> bool\n\n self.entries = asyncio.Queue()\n\n if self.reverse:\n self._strategy = self._after_strategy\n if self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n else:\n self._strategy = self._before_strategy\n if self.after and self.after != OLDEST_OBJECT:\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n\n async def _before_strategy(self, retrieve):\n before = self.before.id if self.before else None\n data: AuditLogPayload = await self.request(\n self.guild.id,\n limit=retrieve,\n user_id=self.user_id,\n action_type=self.action_type,\n before=before,\n )\n\n entries = data.get(\"audit_log_entries\", [])\n if len(data) and entries:\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(entries[-1][\"id\"]))\n return data.get(\"users\", []), entries\n\n async def _after_strategy(self, retrieve):\n after = self.after.id if self.after else None\n data: AuditLogPayload = await self.request(\n self.guild.id,\n limit=retrieve,\n user_id=self.user_id,\n action_type=self.action_type,\n after=after,\n )\n entries = data.get(\"audit_log_entries\", [])\n if len(data) and entries:\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(entries[0][\"id\"]))\n return data.get(\"users\", []), entries\n\n async def next(self) -> AuditLogEntry:\n if self.entries.empty():\n await self._fill()\n\n try:\n return self.entries.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def _fill(self):\n from .user import User\n\n if self._get_retrieve():\n users, data = await self._strategy(self.retrieve)\n if len(data) < 100:\n self.limit = 0 # terminate the infinite loop\n\n if self.reverse:\n data = reversed(data)\n if self._filter:\n data = filter(self._filter, data)\n\n for user in users:\n u = User(data=user, state=self._state)\n self._users[u.id] = u\n\n for element in data:\n # TODO: remove this if statement later\n if element[\"action_type\"] is None:\n continue\n\n await self.entries.put(\n AuditLogEntry(data=element, users=self._users, guild=self.guild)\n )\n\n\nclass GuildIterator(_AsyncIterator[\"Guild\"]):\n \"\"\"Iterator for receiving the client's guilds.\n\n The guilds endpoint has the same two behaviours as described\n in :class:`HistoryIterator`:\n If ``before`` is specified, the guilds endpoint returns the ``limit``\n newest guilds before ``before``, sorted with newest first. For filling over\n 100 guilds, update the ``before`` parameter to the oldest guild received.\n Guilds will be returned in order by time.\n If `after` is specified, it returns the ``limit`` oldest guilds after ``after``,\n sorted with newest first. For filling over 100 guilds, update the ``after``\n parameter to the newest guild received, If guilds are not reversed, they\n will be out of order (99-0, 199-100, so on)\n\n Not that if both ``before`` and ``after`` are specified, ``before`` is ignored by the\n guilds endpoint.\n\n Parameters\n ----------\n bot: :class:`discord.Client`\n The client to retrieve the guilds from.\n limit: :class:`int`\n Maximum number of guilds to retrieve.\n before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Object before which all guilds must be.\n after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Object after which all guilds must be.\n \"\"\"\n\n def __init__(self, bot, limit, before=None, after=None):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.bot = bot\n self.limit = limit\n self.before = before\n self.after = after\n\n self._filter = None\n\n self.state = self.bot._connection\n self.get_guilds = self.bot.http.get_guilds\n self.guilds = asyncio.Queue()\n\n if self.before and self.after:\n self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n elif self.after:\n self._retrieve_guilds = self._retrieve_guilds_after_strategy # type: ignore\n else:\n self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore\n\n async def next(self) -> Guild:\n if self.guilds.empty():\n await self.fill_guilds()\n\n try:\n return self.guilds.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n def create_guild(self, data):\n from .guild import Guild\n\n return Guild(state=self.state, data=data)\n\n async def fill_guilds(self):\n if self._get_retrieve():\n data = await self._retrieve_guilds(self.retrieve)\n if self.limit is None or len(data) < 100:\n self.limit = 0\n\n if self._filter:\n data = filter(self._filter, data)\n\n for element in data:\n await self.guilds.put(self.create_guild(element))\n\n async def _retrieve_guilds(self, retrieve) -> list[Guild]:\n \"\"\"Retrieve guilds and update next parameters.\"\"\"\n raise NotImplementedError\n\n async def _retrieve_guilds_before_strategy(self, retrieve):\n \"\"\"Retrieve guilds using before parameter.\"\"\"\n before = self.before.id if self.before else None\n data: list[GuildPayload] = await self.get_guilds(retrieve, before=before)\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(data[-1][\"id\"]))\n return data\n\n async def _retrieve_guilds_after_strategy(self, retrieve):\n \"\"\"Retrieve guilds using after parameter.\"\"\"\n after = self.after.id if self.after else None\n data: list[GuildPayload] = await self.get_guilds(retrieve, after=after)\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(data[0][\"id\"]))\n return data\n\n\nclass MemberIterator(_AsyncIterator[\"Member\"]):\n def __init__(self, guild, limit=1000, after=None):\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.guild = guild\n self.limit = limit\n self.after = after or OLDEST_OBJECT\n\n self.state = self.guild._state\n self.get_members = self.state.http.get_members\n self.members = asyncio.Queue()\n\n async def next(self) -> Member:\n if self.members.empty():\n await self.fill_members()\n\n try:\n return self.members.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 1000:\n r = 1000\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_members(self):\n if not self._get_retrieve():\n return\n after = self.after.id if self.after else None\n data = await self.get_members(self.guild.id, self.retrieve, after)\n if not data:\n # no data, terminate\n return\n\n if len(data) < 1000:\n self.limit = 0 # terminate loop\n\n self.after = Object(id=int(data[-1][\"user\"][\"id\"]))\n\n for element in reversed(data):\n await self.members.put(self.create_member(element))\n\n def create_member(self, data):\n from .member import Member\n\n return Member(data=data, guild=self.guild, state=self.state)\n\n\nclass BanIterator(_AsyncIterator[\"BanEntry\"]):\n def __init__(self, guild, limit=None, before=None, after=None):\n self.guild = guild\n self.limit = limit\n self.after = after\n self.before = before\n\n self.state = self.guild._state\n self.get_bans = self.state.http.get_bans\n self.bans = asyncio.Queue()\n\n async def next(self) -> BanEntry:\n if self.bans.empty():\n await self.fill_bans()\n\n try:\n return self.bans.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 1000:\n r = 1000\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_bans(self):\n if not self._get_retrieve():\n return\n before = self.before.id if self.before else None\n after = self.after.id if self.after else None\n data = await self.get_bans(self.guild.id, self.retrieve, before, after)\n if not data:\n # no data, terminate\n return\n if self.limit:\n self.limit -= self.retrieve\n\n if len(data) < 1000:\n self.limit = 0 # terminate loop\n\n self.after = Object(id=int(data[-1][\"user\"][\"id\"]))\n\n for element in reversed(data):\n await self.bans.put(self.create_ban(element))\n\n def create_ban(self, data):\n from .guild import BanEntry\n from .user import User\n\n return BanEntry(\n reason=data[\"reason\"], user=User(state=self.state, data=data[\"user\"])\n )\n\n\nclass ArchivedThreadIterator(_AsyncIterator[\"Thread\"]):\n def __init__(\n self,\n channel_id: int,\n guild: Guild,\n limit: int | None,\n joined: bool,\n private: bool,\n before: Snowflake | datetime.datetime | None = None,\n ):\n self.channel_id = channel_id\n self.guild = guild\n self.limit = limit\n self.joined = joined\n self.private = private\n self.http = guild._state.http\n\n if joined and not private:\n raise ValueError(\"Cannot iterate over joined public archived threads\")\n\n self.before: str | None\n if before is None:\n self.before = None\n elif isinstance(before, datetime.datetime):\n if joined:\n self.before = str(time_snowflake(before, high=False))\n else:\n self.before = before.isoformat()\n else:\n if joined:\n self.before = str(before.id)\n else:\n self.before = snowflake_time(before.id).isoformat()\n\n self.update_before: Callable[[ThreadPayload], str] = self.get_archive_timestamp\n\n if joined:\n self.endpoint = self.http.get_joined_private_archived_threads\n self.update_before = self.get_thread_id\n elif private:\n self.endpoint = self.http.get_private_archived_threads\n else:\n self.endpoint = self.http.get_public_archived_threads\n\n self.queue: asyncio.Queue[Thread] = asyncio.Queue()\n self.has_more: bool = True\n\n async def next(self) -> Thread:\n if self.queue.empty():\n await self.fill_queue()\n\n try:\n return self.queue.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n @staticmethod\n def get_archive_timestamp(data: ThreadPayload) -> str:\n return data[\"thread_metadata\"][\"archive_timestamp\"]\n\n @staticmethod\n def get_thread_id(data: ThreadPayload) -> str:\n return data[\"id\"] # type: ignore\n\n async def fill_queue(self) -> None:\n if not self.has_more:\n raise NoMoreItems()\n\n limit = 50 if self.limit is None else max(self.limit, 50)\n data = await self.endpoint(self.channel_id, before=self.before, limit=limit)\n\n # This stuff is obviously WIP because 'members' is always empty\n threads: list[ThreadPayload] = data.get(\"threads\", [])\n for d in reversed(threads):\n self.queue.put_nowait(self.create_thread(d))\n\n self.has_more = data.get(\"has_more\", False)\n if self.limit is not None:\n self.limit -= len(threads)\n if self.limit <= 0:\n self.has_more = False\n\n if self.has_more:\n self.before = self.update_before(threads[-1])\n\n def create_thread(self, data: ThreadPayload) -> Thread:\n from .threads import Thread\n\n return Thread(guild=self.guild, state=self.guild._state, data=data)\n\n\nclass ScheduledEventSubscribersIterator(_AsyncIterator[Union[\"User\", \"Member\"]]):\n def __init__(\n self,\n event: ScheduledEvent,\n limit: int,\n with_member: bool = False,\n before: datetime.datetime | int = None,\n after: datetime.datetime | int = None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.event = event\n self.limit = limit\n self.with_member = with_member\n self.before = before\n self.after = after\n\n self.subscribers = asyncio.Queue()\n self.get_subscribers = self.event._state.http.get_scheduled_event_users\n\n async def next(self) -> User | Member:\n if self.subscribers.empty():\n await self.fill_subs()\n\n try:\n return self.subscribers.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n def member_from_payload(self, data):\n from .member import Member\n\n user = data.pop(\"user\")\n\n member = data.pop(\"member\")\n member[\"user\"] = user\n\n return Member(data=member, guild=self.event.guild, state=self.event._state)\n\n def user_from_payload(self, data):\n from .user import User\n\n user = data.pop(\"user\")\n\n return User(state=self.event._state, data=user)\n\n async def fill_subs(self):\n if not self._get_retrieve():\n return\n before = self.before.id if self.before else None\n after = self.after.id if self.after else None\n data = await self.get_subscribers(\n guild_id=self.event.guild.id,\n event_id=self.event.id,\n limit=self.retrieve,\n with_member=self.with_member,\n before=before,\n after=after,\n )\n if data:\n self.limit -= self.retrieve\n\n for element in reversed(data):\n if \"member\" in element:\n await self.subscribers.put(self.member_from_payload(element))\n else:\n await self.subscribers.put(self.user_from_payload(element))\n", "path": "discord/iterators.py" } ]
[ { "content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport datetime\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterator,\n Awaitable,\n Callable,\n List,\n TypeVar,\n Union,\n)\n\nfrom .audit_logs import AuditLogEntry\nfrom .errors import NoMoreItems\nfrom .object import Object\nfrom .utils import maybe_coroutine, snowflake_time, time_snowflake\n\n__all__ = (\n \"ReactionIterator\",\n \"HistoryIterator\",\n \"AuditLogIterator\",\n \"GuildIterator\",\n \"MemberIterator\",\n \"ScheduledEventSubscribersIterator\",\n)\n\nif TYPE_CHECKING:\n from .abc import Snowflake\n from .guild import BanEntry, Guild\n from .member import Member\n from .message import Message\n from .scheduled_events import ScheduledEvent\n from .threads import Thread\n from .types.audit_log import AuditLog as AuditLogPayload\n from .types.guild import Guild as GuildPayload\n from .types.message import Message as MessagePayload\n from .types.threads import Thread as ThreadPayload\n from .types.user import PartialUser as PartialUserPayload\n from .user import User\n\nT = TypeVar(\"T\")\nOT = TypeVar(\"OT\")\n_Func = Callable[[T], Union[OT, Awaitable[OT]]]\n\nOLDEST_OBJECT = Object(id=0)\n\n\nclass _AsyncIterator(AsyncIterator[T]):\n __slots__ = ()\n\n async def next(self) -> T:\n raise NotImplementedError\n\n def get(self, **attrs: Any) -> Awaitable[T | None]:\n def predicate(elem: T):\n for attr, val in attrs.items():\n nested = attr.split(\"__\")\n obj = elem\n for attribute in nested:\n obj = getattr(obj, attribute)\n\n if obj != val:\n return False\n return True\n\n return self.find(predicate)\n\n async def find(self, predicate: _Func[T, bool]) -> T | None:\n while True:\n try:\n elem = await self.next()\n except NoMoreItems:\n return None\n\n ret = await maybe_coroutine(predicate, elem)\n if ret:\n return elem\n\n def chunk(self, max_size: int) -> _ChunkedAsyncIterator[T]:\n if max_size <= 0:\n raise ValueError(\"async iterator chunk sizes must be greater than 0.\")\n return _ChunkedAsyncIterator(self, max_size)\n\n def map(self, func: _Func[T, OT]) -> _MappedAsyncIterator[OT]:\n return _MappedAsyncIterator(self, func)\n\n def filter(self, predicate: _Func[T, bool]) -> _FilteredAsyncIterator[T]:\n return _FilteredAsyncIterator(self, predicate)\n\n async def flatten(self) -> list[T]:\n return [element async for element in self]\n\n async def __anext__(self) -> T:\n try:\n return await self.next()\n except NoMoreItems:\n raise StopAsyncIteration()\n\n\ndef _identity(x):\n return x\n\n\nclass _ChunkedAsyncIterator(_AsyncIterator[List[T]]):\n def __init__(self, iterator, max_size):\n self.iterator = iterator\n self.max_size = max_size\n\n async def next(self) -> list[T]:\n ret: list[T] = []\n n = 0\n while n < self.max_size:\n try:\n item = await self.iterator.next()\n except NoMoreItems:\n if ret:\n return ret\n raise\n else:\n ret.append(item)\n n += 1\n return ret\n\n\nclass _MappedAsyncIterator(_AsyncIterator[T]):\n def __init__(self, iterator, func):\n self.iterator = iterator\n self.func = func\n\n async def next(self) -> T:\n # this raises NoMoreItems and will propagate appropriately\n item = await self.iterator.next()\n return await maybe_coroutine(self.func, item)\n\n\nclass _FilteredAsyncIterator(_AsyncIterator[T]):\n def __init__(self, iterator, predicate):\n self.iterator = iterator\n\n if predicate is None:\n predicate = _identity\n\n self.predicate = predicate\n\n async def next(self) -> T:\n getter = self.iterator.next\n pred = self.predicate\n while True:\n # propagate NoMoreItems similar to _MappedAsyncIterator\n item = await getter()\n ret = await maybe_coroutine(pred, item)\n if ret:\n return item\n\n\nclass ReactionIterator(_AsyncIterator[Union[\"User\", \"Member\"]]):\n def __init__(self, message, emoji, limit=100, after=None, type=None):\n self.message = message\n self.limit = limit\n self.after = after\n self.type = type\n state = message._state\n self.getter = state.http.get_reaction_users\n self.state = state\n self.emoji = emoji\n self.guild = message.guild\n self.channel_id = message.channel.id\n self.users = asyncio.Queue()\n\n async def next(self) -> User | Member:\n if self.users.empty():\n await self.fill_users()\n\n try:\n return self.users.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n async def fill_users(self):\n # this is a hack because >circular imports<\n from .user import User\n\n if self.limit > 0:\n retrieve = min(self.limit, 100)\n\n after = self.after.id if self.after else None\n data: list[PartialUserPayload] = await self.getter(\n self.channel_id,\n self.message.id,\n self.emoji,\n retrieve,\n after=after,\n type=self.type,\n )\n\n if data:\n self.limit -= retrieve\n self.after = Object(id=int(data[-1][\"id\"]))\n\n for element in reversed(data):\n if self.guild is None or isinstance(self.guild, Object):\n await self.users.put(User(state=self.state, data=element))\n else:\n member_id = int(element[\"id\"])\n member = self.guild.get_member(member_id)\n if member is not None:\n await self.users.put(member)\n else:\n await self.users.put(User(state=self.state, data=element))\n\n\nclass HistoryIterator(_AsyncIterator[\"Message\"]):\n \"\"\"Iterator for receiving a channel's message history.\n\n The messages endpoint has two behaviours we care about here:\n If ``before`` is specified, the messages endpoint returns the `limit`\n newest messages before ``before``, sorted with newest first. For filling over\n 100 messages, update the ``before`` parameter to the oldest message received.\n Messages will be returned in order by time.\n If ``after`` is specified, it returns the ``limit`` oldest messages after\n ``after``, sorted with newest first. For filling over 100 messages, update the\n ``after`` parameter to the newest message received. If messages are not\n reversed, they will be out of order (99-0, 199-100, so on)\n\n A note that if both ``before`` and ``after`` are specified, ``before`` is ignored by the\n messages endpoint.\n\n Parameters\n ----------\n messageable: :class:`abc.Messageable`\n Messageable class to retrieve message history from.\n limit: :class:`int`\n Maximum number of messages to retrieve\n before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message before which all messages must be.\n after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message after which all messages must be.\n around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Message around which all messages must be. Limit max 101. Note that if\n limit is an even number, this will return at most limit+1 messages.\n oldest_first: Optional[:class:`bool`]\n If set to ``True``, return messages in oldest->newest order. Defaults to\n ``True`` if `after` is specified, otherwise ``False``.\n \"\"\"\n\n def __init__(\n self,\n messageable,\n limit,\n before=None,\n after=None,\n around=None,\n oldest_first=None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n if isinstance(around, datetime.datetime):\n around = Object(id=time_snowflake(around))\n\n self.reverse = after is not None if oldest_first is None else oldest_first\n self.messageable = messageable\n self.limit = limit\n self.before = before\n self.after = after or OLDEST_OBJECT\n self.around = around\n\n self._filter = None # message dict -> bool\n\n self.state = self.messageable._state\n self.logs_from = self.state.http.logs_from\n self.messages = asyncio.Queue()\n\n if self.around:\n if self.limit is None:\n raise ValueError(\"history does not support around with limit=None\")\n if self.limit > 101:\n raise ValueError(\n \"history max limit 101 when specifying around parameter\"\n )\n elif self.limit == 101:\n self.limit = 100 # Thanks discord\n\n self._retrieve_messages = self._retrieve_messages_around_strategy # type: ignore\n if self.before and self.after:\n self._filter = lambda m: self.after.id < int(m[\"id\"]) < self.before.id\n elif self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n elif self.after:\n self._filter = lambda m: self.after.id < int(m[\"id\"])\n elif self.reverse:\n self._retrieve_messages = self._retrieve_messages_after_strategy # type: ignore\n if self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n else:\n self._retrieve_messages = self._retrieve_messages_before_strategy # type: ignore\n if self.after and self.after != OLDEST_OBJECT:\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n\n async def next(self) -> Message:\n if self.messages.empty():\n await self.fill_messages()\n\n try:\n return self.messages.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_messages(self):\n if not hasattr(self, \"channel\"):\n # do the required set up\n channel = await self.messageable._get_channel()\n self.channel = channel\n\n if self._get_retrieve():\n data = await self._retrieve_messages(self.retrieve)\n if len(data) < 100:\n self.limit = 0 # terminate the infinite loop\n\n if self.reverse:\n data = reversed(data)\n if self._filter:\n data = filter(self._filter, data)\n\n channel = self.channel\n for element in data:\n await self.messages.put(\n self.state.create_message(channel=channel, data=element)\n )\n\n async def _retrieve_messages(self, retrieve) -> list[Message]:\n \"\"\"Retrieve messages and update next parameters.\"\"\"\n raise NotImplementedError\n\n async def _retrieve_messages_before_strategy(self, retrieve):\n \"\"\"Retrieve messages using before parameter.\"\"\"\n before = self.before.id if self.before else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, before=before\n )\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(data[-1][\"id\"]))\n return data\n\n async def _retrieve_messages_after_strategy(self, retrieve):\n \"\"\"Retrieve messages using after parameter.\"\"\"\n after = self.after.id if self.after else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, after=after\n )\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(data[0][\"id\"]))\n return data\n\n async def _retrieve_messages_around_strategy(self, retrieve):\n \"\"\"Retrieve messages using around parameter.\"\"\"\n if self.around:\n around = self.around.id if self.around else None\n data: list[MessagePayload] = await self.logs_from(\n self.channel.id, retrieve, around=around\n )\n self.around = None\n return data\n return []\n\n\nclass AuditLogIterator(_AsyncIterator[\"AuditLogEntry\"]):\n def __init__(\n self,\n guild,\n limit=None,\n before=None,\n after=None,\n oldest_first=None,\n user_id=None,\n action_type=None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.reverse = after is not None if oldest_first is None else oldest_first\n self.guild = guild\n self.loop = guild._state.loop\n self.request = guild._state.http.get_audit_logs\n self.limit = limit\n self.before = before\n self.user_id = user_id\n self.action_type = action_type\n self.after = after or OLDEST_OBJECT\n self._users = {}\n self._state = guild._state\n\n self._filter = None # entry dict -> bool\n\n self.entries = asyncio.Queue()\n\n if self.reverse:\n self._strategy = self._after_strategy\n if self.before:\n self._filter = lambda m: int(m[\"id\"]) < self.before.id\n else:\n self._strategy = self._before_strategy\n if self.after and self.after != OLDEST_OBJECT:\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n\n async def _before_strategy(self, retrieve):\n before = self.before.id if self.before else None\n data: AuditLogPayload = await self.request(\n self.guild.id,\n limit=retrieve,\n user_id=self.user_id,\n action_type=self.action_type,\n before=before,\n )\n\n entries = data.get(\"audit_log_entries\", [])\n if len(data) and entries:\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(entries[-1][\"id\"]))\n return data.get(\"users\", []), entries\n\n async def _after_strategy(self, retrieve):\n after = self.after.id if self.after else None\n data: AuditLogPayload = await self.request(\n self.guild.id,\n limit=retrieve,\n user_id=self.user_id,\n action_type=self.action_type,\n after=after,\n )\n entries = data.get(\"audit_log_entries\", [])\n if len(data) and entries:\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(entries[0][\"id\"]))\n return data.get(\"users\", []), entries\n\n async def next(self) -> AuditLogEntry:\n if self.entries.empty():\n await self._fill()\n\n try:\n return self.entries.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def _fill(self):\n from .user import User\n\n if self._get_retrieve():\n users, data = await self._strategy(self.retrieve)\n if len(data) < 100:\n self.limit = 0 # terminate the infinite loop\n\n if self.reverse:\n data = reversed(data)\n if self._filter:\n data = filter(self._filter, data)\n\n for user in users:\n u = User(data=user, state=self._state)\n self._users[u.id] = u\n\n for element in data:\n # TODO: remove this if statement later\n if element[\"action_type\"] is None:\n continue\n\n await self.entries.put(\n AuditLogEntry(data=element, users=self._users, guild=self.guild)\n )\n\n\nclass GuildIterator(_AsyncIterator[\"Guild\"]):\n \"\"\"Iterator for receiving the client's guilds.\n\n The guilds endpoint has the same two behaviours as described\n in :class:`HistoryIterator`:\n If ``before`` is specified, the guilds endpoint returns the ``limit``\n newest guilds before ``before``, sorted with newest first. For filling over\n 100 guilds, update the ``before`` parameter to the oldest guild received.\n Guilds will be returned in order by time.\n If `after` is specified, it returns the ``limit`` oldest guilds after ``after``,\n sorted with newest first. For filling over 100 guilds, update the ``after``\n parameter to the newest guild received, If guilds are not reversed, they\n will be out of order (99-0, 199-100, so on)\n\n Not that if both ``before`` and ``after`` are specified, ``before`` is ignored by the\n guilds endpoint.\n\n Parameters\n ----------\n bot: :class:`discord.Client`\n The client to retrieve the guilds from.\n limit: :class:`int`\n Maximum number of guilds to retrieve.\n before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Object before which all guilds must be.\n after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]\n Object after which all guilds must be.\n \"\"\"\n\n def __init__(self, bot, limit, before=None, after=None):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.bot = bot\n self.limit = limit\n self.before = before\n self.after = after\n\n self._filter = None\n\n self.state = self.bot._connection\n self.get_guilds = self.bot.http.get_guilds\n self.guilds = asyncio.Queue()\n\n if self.before and self.after:\n self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore\n self._filter = lambda m: int(m[\"id\"]) > self.after.id\n elif self.after:\n self._retrieve_guilds = self._retrieve_guilds_after_strategy # type: ignore\n else:\n self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore\n\n async def next(self) -> Guild:\n if self.guilds.empty():\n await self.fill_guilds()\n\n try:\n return self.guilds.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n def create_guild(self, data):\n from .guild import Guild\n\n return Guild(state=self.state, data=data)\n\n async def fill_guilds(self):\n if self._get_retrieve():\n data = await self._retrieve_guilds(self.retrieve)\n if self.limit is None or len(data) < 100:\n self.limit = 0\n\n if self._filter:\n data = filter(self._filter, data)\n\n for element in data:\n await self.guilds.put(self.create_guild(element))\n\n async def _retrieve_guilds(self, retrieve) -> list[Guild]:\n \"\"\"Retrieve guilds and update next parameters.\"\"\"\n raise NotImplementedError\n\n async def _retrieve_guilds_before_strategy(self, retrieve):\n \"\"\"Retrieve guilds using before parameter.\"\"\"\n before = self.before.id if self.before else None\n data: list[GuildPayload] = await self.get_guilds(retrieve, before=before)\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.before = Object(id=int(data[-1][\"id\"]))\n return data\n\n async def _retrieve_guilds_after_strategy(self, retrieve):\n \"\"\"Retrieve guilds using after parameter.\"\"\"\n after = self.after.id if self.after else None\n data: list[GuildPayload] = await self.get_guilds(retrieve, after=after)\n if len(data):\n if self.limit is not None:\n self.limit -= retrieve\n self.after = Object(id=int(data[0][\"id\"]))\n return data\n\n\nclass MemberIterator(_AsyncIterator[\"Member\"]):\n def __init__(self, guild, limit=1000, after=None):\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.guild = guild\n self.limit = limit\n self.after = after or OLDEST_OBJECT\n\n self.state = self.guild._state\n self.get_members = self.state.http.get_members\n self.members = asyncio.Queue()\n\n async def next(self) -> Member:\n if self.members.empty():\n await self.fill_members()\n\n try:\n return self.members.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 1000:\n r = 1000\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_members(self):\n if not self._get_retrieve():\n return\n after = self.after.id if self.after else None\n data = await self.get_members(self.guild.id, self.retrieve, after)\n if not data:\n # no data, terminate\n return\n\n if len(data) < 1000:\n self.limit = 0 # terminate loop\n\n self.after = Object(id=int(data[-1][\"user\"][\"id\"]))\n\n for element in reversed(data):\n await self.members.put(self.create_member(element))\n\n def create_member(self, data):\n from .member import Member\n\n return Member(data=data, guild=self.guild, state=self.state)\n\n\nclass BanIterator(_AsyncIterator[\"BanEntry\"]):\n def __init__(self, guild, limit=None, before=None, after=None):\n self.guild = guild\n self.limit = limit\n self.after = after\n self.before = before\n\n self.state = self.guild._state\n self.get_bans = self.state.http.get_bans\n self.bans = asyncio.Queue()\n\n async def next(self) -> BanEntry:\n if self.bans.empty():\n await self.fill_bans()\n\n try:\n return self.bans.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 1000:\n r = 1000\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n async def fill_bans(self):\n if not self._get_retrieve():\n return\n before = self.before.id if self.before else None\n after = self.after.id if self.after else None\n data = await self.get_bans(self.guild.id, self.retrieve, before, after)\n if not data:\n # no data, terminate\n return\n if self.limit:\n self.limit -= self.retrieve\n\n if len(data) < 1000:\n self.limit = 0 # terminate loop\n\n self.after = Object(id=int(data[-1][\"user\"][\"id\"]))\n\n for element in reversed(data):\n await self.bans.put(self.create_ban(element))\n\n def create_ban(self, data):\n from .guild import BanEntry\n from .user import User\n\n return BanEntry(\n reason=data[\"reason\"], user=User(state=self.state, data=data[\"user\"])\n )\n\n\nclass ArchivedThreadIterator(_AsyncIterator[\"Thread\"]):\n def __init__(\n self,\n channel_id: int,\n guild: Guild,\n limit: int | None,\n joined: bool,\n private: bool,\n before: Snowflake | datetime.datetime | None = None,\n ):\n self.channel_id = channel_id\n self.guild = guild\n self.limit = limit\n self.joined = joined\n self.private = private\n self.http = guild._state.http\n\n if joined and not private:\n raise ValueError(\"Cannot iterate over joined public archived threads\")\n\n self.before: str | None\n if before is None:\n self.before = None\n elif isinstance(before, datetime.datetime):\n if joined:\n self.before = str(time_snowflake(before, high=False))\n else:\n self.before = before.isoformat()\n else:\n if joined:\n self.before = str(before.id)\n else:\n self.before = snowflake_time(before.id).isoformat()\n\n self.update_before: Callable[[ThreadPayload], str] = self.get_archive_timestamp\n\n if joined:\n self.endpoint = self.http.get_joined_private_archived_threads\n self.update_before = self.get_thread_id\n elif private:\n self.endpoint = self.http.get_private_archived_threads\n else:\n self.endpoint = self.http.get_public_archived_threads\n\n self.queue: asyncio.Queue[Thread] = asyncio.Queue()\n self.has_more: bool = True\n\n async def next(self) -> Thread:\n if self.queue.empty():\n await self.fill_queue()\n\n try:\n return self.queue.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n @staticmethod\n def get_archive_timestamp(data: ThreadPayload) -> str:\n return data[\"thread_metadata\"][\"archive_timestamp\"]\n\n @staticmethod\n def get_thread_id(data: ThreadPayload) -> str:\n return data[\"id\"] # type: ignore\n\n async def fill_queue(self) -> None:\n if not self.has_more:\n raise NoMoreItems()\n\n limit = 50 if self.limit is None else max(self.limit, 50)\n data = await self.endpoint(self.channel_id, before=self.before, limit=limit)\n\n # This stuff is obviously WIP because 'members' is always empty\n threads: list[ThreadPayload] = data.get(\"threads\", [])\n for d in reversed(threads):\n self.queue.put_nowait(self.create_thread(d))\n\n self.has_more = data.get(\"has_more\", False)\n if self.limit is not None:\n self.limit -= len(threads)\n if self.limit <= 0:\n self.has_more = False\n\n if self.has_more:\n self.before = self.update_before(threads[-1])\n\n def create_thread(self, data: ThreadPayload) -> Thread:\n from .threads import Thread\n\n return Thread(guild=self.guild, state=self.guild._state, data=data)\n\n\nclass ScheduledEventSubscribersIterator(_AsyncIterator[Union[\"User\", \"Member\"]]):\n def __init__(\n self,\n event: ScheduledEvent,\n limit: int,\n with_member: bool = False,\n before: datetime.datetime | int = None,\n after: datetime.datetime | int = None,\n ):\n if isinstance(before, datetime.datetime):\n before = Object(id=time_snowflake(before, high=False))\n if isinstance(after, datetime.datetime):\n after = Object(id=time_snowflake(after, high=True))\n\n self.event = event\n self.limit = limit\n self.with_member = with_member\n self.before = before\n self.after = after\n\n self.subscribers = asyncio.Queue()\n self.get_subscribers = self.event._state.http.get_scheduled_event_users\n\n async def next(self) -> User | Member:\n if self.subscribers.empty():\n await self.fill_subs()\n\n try:\n return self.subscribers.get_nowait()\n except asyncio.QueueEmpty:\n raise NoMoreItems()\n\n def _get_retrieve(self):\n l = self.limit\n if l is None or l > 100:\n r = 100\n else:\n r = l\n self.retrieve = r\n return r > 0\n\n def member_from_payload(self, data):\n from .member import Member\n\n user = data.pop(\"user\")\n\n member = data.pop(\"member\")\n member[\"user\"] = user\n\n return Member(data=member, guild=self.event.guild, state=self.event._state)\n\n def user_from_payload(self, data):\n from .user import User\n\n user = data.pop(\"user\")\n\n return User(state=self.event._state, data=user)\n\n async def fill_subs(self):\n if not self._get_retrieve():\n return\n before = self.before.id if self.before else None\n after = self.after.id if self.after else None\n data = await self.get_subscribers(\n guild_id=self.event.guild.id,\n event_id=self.event.id,\n limit=self.retrieve,\n with_member=self.with_member,\n before=before,\n after=after,\n )\n if data:\n self.limit -= self.retrieve\n\n for element in reversed(data):\n if \"member\" in element:\n await self.subscribers.put(self.member_from_payload(element))\n else:\n await self.subscribers.put(self.user_from_payload(element))\n", "path": "discord/iterators.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 6928a6bac4..6ece1cf3d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -201,6 +201,8 @@ These changes are available on the `master` branch, but have not yet been releas ([#2243](https://github.com/Pycord-Development/pycord/pull/2243)) - Fixed `Intents.all()` returning the wrong value. ([#2257](https://github.com/Pycord-Development/pycord/issues/2257)) +- Fixed `AuditLogIterator` not respecting the `after` parameter. + ([#2295](https://github.com/Pycord-Development/pycord/issues/2295)) ## [2.4.1] - 2023-03-20 diff --git a/discord/iterators.py b/discord/iterators.py index b171d70ed6..7507cfd5d8 100644 --- a/discord/iterators.py +++ b/discord/iterators.py @@ -430,7 +430,7 @@ def __init__( self.before = before self.user_id = user_id self.action_type = action_type - self.after = OLDEST_OBJECT + self.after = after or OLDEST_OBJECT self._users = {} self._state = guild._state
scikit-image__scikit-image-1145
find_contours returns (y,x) instead of (x,y) pairs The API doc says for the return value "Each contour is an ndarray of shape `(n, 2)`, consisting of n `(x, y)` coordinates along the contour." A small test case, with input: ![figure_4](https://cloud.githubusercontent.com/assets/530988/4217168/2ceec576-38e4-11e4-8b27-62839077635e.png) ``` def testImage(): n = 10 coord = np.ones((n,n)) r = n*0.4 y,x = np.ogrid[-r: r+1, -r: r+1] mask = x**2+y**2 <= r**2 coord[mask] = -coord[mask] coord[coord>0] = np.nan # make it non-symmetric coord[4,0] = np.nan r = ~np.isnan(coord) return np.hstack((r,r)) import skimage.measure contours = skimage.measure.find_contours(testImage(), 0.99) print contours # output: [array([[ 0., 4.], [ 1., 5.], [ 1., 6.], [ 1., 6.], [ 2., 7.], [ 2., 7.], [ 3., 7.], [ 4., 8.], [ 4., 8.], [ 4., 8.], [ 5., 7.], [ 6., 7.], [ 6., 7.], [ 7., 6.], [ 7., 6.], [ 7., 5.], [ 8., 4.], [ 8., 4.], [ 8., 4.], [ 7., 3.], [ 7., 2.], [ 7., 2.], [ 6., 1.], [ 6., 1.], [ 5., 1.], [ 4., 1.], [ 3., 1.], [ 2., 1.], [ 2., 1.], [ 1., 2.], [ 1., 2.], [ 1., 3.], [ 0., 4.]]), array([[ 0., 14.], [ 1., 15.], [ 1., 16.], [ 1., 16.], [ 2., 17.], [ 2., 17.], [ 3., 17.], [ 4., 18.], [ 4., 18.], [ 4., 18.], [ 5., 17.], [ 6., 17.], [ 6., 17.], [ 7., 16.], [ 7., 16.], [ 7., 15.], [ 8., 14.], [ 8., 14.], [ 8., 14.], [ 7., 13.], [ 7., 12.], [ 7., 12.], [ 6., 11.], [ 6., 11.], [ 5., 11.], [ 4., 11.], [ 3., 11.], [ 2., 11.], [ 2., 11.], [ 1., 12.], [ 1., 12.], [ 1., 13.], [ 0., 14.]])] ``` You can see clearly that the x-coordinate is actually in the second column and not the first. Apart from that, why are there consecutive duplicates? Should I open a separate issue for that?
[ { "content": "import numpy as np\nfrom . import _find_contours_cy\n\nfrom collections import deque\n\n_param_options = ('high', 'low')\n\n\ndef find_contours(array, level,\n fully_connected='low', positive_orientation='low'):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n\n Uses the \"marching squares\" method to compute a the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n\n Parameters\n ----------\n array : 2D ndarray of double\n Input data in which to find contours.\n level : float\n Value along which to find contours in the array.\n fully_connected : str, {'low', 'high'}\n Indicates whether array elements below the given level value are to be\n considered fully-connected (and hence elements above the value will\n only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : either 'low' or 'high'\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n contours will wind counter- clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n\n Returns\n -------\n contours : list of (n,2)-ndarrays\n Each contour is an ndarray of shape ``(n, 2)``,\n consisting of n ``(x, y)`` coordinates along the contour.\n\n Notes\n -----\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here::\n\n http://www.essi.fr/~lingrand/MarchingCubes/algo.html\n\n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n elements, each pair diagonally adjacent. (Where high- and low-valued is\n with respect to the contour value sought.) In this case, either the\n high-valued elements can be 'connected together' via a thin isthmus that\n separates the low-valued elements, or vice-versa. When elements are\n connected together across a diagonal, they are considered 'fully\n connected' (also known as 'face+vertex-connected' or '8-connected'). Only\n high-valued or low-valued elements can be fully-connected, the other set\n will be considered as 'face-connected' or '4-connected'. By default,\n low-valued elements are considered fully-connected; this can be altered\n with the 'fully_connected' parameter.\n\n Output contours are not guaranteed to be closed: contours which intersect\n the array edge will be left open. All other contours will be closed. (The\n closed-ness of a contours can be tested by checking whether the beginning\n point is the same as the end point.)\n\n Contours are oriented. By default, array values lower than the contour\n value are to the left of the contour and values greater than the contour\n value are to the right. This means that contours will wind\n counter-clockwise (i.e. in 'positive orientation') around islands of\n low-valued pixels. This behavior can be altered with the\n 'positive_orientation' parameter.\n\n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n contour. This is a side-effect of how the input array is traversed, but\n can be relied upon.\n\n .. warning::\n\n Array coordinates/values are assumed to refer to the *center* of the\n array element. Take a simple example input: ``[0, 1]``. The interpolated\n position of 0.5 in this array is midway between the 0-element (at\n ``x=0``) and the 1-element (at ``x=1``), and thus would fall at\n ``x=0.5``.\n\n This means that to find reasonable contours, it is best to find contours\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n especially around structures that are a single array element wide. Instead\n choose a middle value, as above.\n\n References\n ----------\n .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n\n Examples\n --------\n >>> a = np.zeros((3, 3))\n >>> a[0, 0] = 1\n >>> a\n array([[ 1., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])\n >>> find_contours(a, 0.5)\n [array([[ 0. , 0.5],\n [ 0.5, 0. ]])]\n \"\"\"\n array = np.asarray(array, dtype=np.double)\n if array.ndim != 2:\n raise ValueError('Only 2D arrays are supported.')\n level = float(level)\n if (fully_connected not in _param_options or\n positive_orientation not in _param_options):\n raise ValueError('Parameters \"fully_connected\" and'\n ' \"positive_orientation\" must be either \"high\" or \"low\".')\n point_list = _find_contours_cy.iterate_and_store(array, level,\n fully_connected == 'high')\n contours = _assemble_contours(_take_2(point_list))\n if positive_orientation == 'high':\n contours = [c[::-1] for c in contours]\n return contours\n\n\ndef _take_2(seq):\n iterator = iter(seq)\n while(True):\n n1 = next(iterator)\n n2 = next(iterator)\n yield (n1, n2)\n\n\ndef _assemble_contours(points_iterator):\n current_index = 0\n contours = {}\n starts = {}\n ends = {}\n for from_point, to_point in points_iterator:\n # Ignore degenerate segments.\n # This happens when (and only when) one vertex of the square is\n # exactly the contour level, and the rest are above or below.\n # This degnerate vertex will be picked up later by neighboring squares.\n if from_point == to_point:\n continue\n\n tail_data = starts.get(to_point)\n head_data = ends.get(from_point)\n\n if tail_data is not None and head_data is not None:\n tail, tail_num = tail_data\n head, head_num = head_data\n # We need to connect these two contours.\n if tail is head:\n # We need to closed a contour.\n # Add the end point, and remove the contour from the\n # 'starts' and 'ends' dicts.\n head.append(to_point)\n del starts[to_point]\n del ends[from_point]\n else: # tail is not head\n # We need to join two distinct contours.\n # We want to keep the first contour segment created, so that\n # the final contours are ordered left->right, top->bottom.\n if tail_num > head_num:\n # tail was created second. Append tail to head.\n head.extend(tail)\n # remove all traces of tail:\n del starts[to_point]\n del ends[tail[-1]]\n del contours[tail_num]\n # remove the old end of head and add the new end.\n del ends[from_point]\n ends[head[-1]] = (head, head_num)\n else: # tail_num <= head_num\n # head was created second. Prepend head to tail.\n tail.extendleft(reversed(head))\n # remove all traces of head:\n del starts[head[0]]\n del ends[from_point]\n del contours[head_num]\n # remove the old start of tail and add the new start.\n del starts[to_point]\n starts[tail[0]] = (tail, tail_num)\n elif tail_data is None and head_data is None:\n # we need to add a new contour\n current_index += 1\n new_num = current_index\n new_contour = deque((from_point, to_point))\n contours[new_num] = new_contour\n starts[from_point] = (new_contour, new_num)\n ends[to_point] = (new_contour, new_num)\n elif tail_data is not None and head_data is None:\n tail, tail_num = tail_data\n # We've found a single contour to which the new segment should be\n # prepended.\n tail.appendleft(from_point)\n del starts[to_point]\n starts[from_point] = (tail, tail_num)\n elif tail_data is None and head_data is not None:\n head, head_num = head_data\n # We've found a single contour to which the new segment should be\n # appended\n head.append(to_point)\n del ends[from_point]\n ends[to_point] = (head, head_num)\n # end iteration over from_ and to_ points\n\n return [np.array(contour) for (num, contour) in sorted(contours.items())]\n", "path": "skimage/measure/_find_contours.py" } ]
[ { "content": "import numpy as np\nfrom . import _find_contours_cy\n\nfrom collections import deque\n\n_param_options = ('high', 'low')\n\n\ndef find_contours(array, level,\n fully_connected='low', positive_orientation='low'):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n\n Uses the \"marching squares\" method to compute a the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n\n Parameters\n ----------\n array : 2D ndarray of double\n Input data in which to find contours.\n level : float\n Value along which to find contours in the array.\n fully_connected : str, {'low', 'high'}\n Indicates whether array elements below the given level value are to be\n considered fully-connected (and hence elements above the value will\n only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : either 'low' or 'high'\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n contours will wind counter- clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n\n Returns\n -------\n contours : list of (n,2)-ndarrays\n Each contour is an ndarray of shape ``(n, 2)``,\n consisting of n ``(row, column)`` coordinates along the contour.\n\n Notes\n -----\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here::\n\n http://www.essi.fr/~lingrand/MarchingCubes/algo.html\n\n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n elements, each pair diagonally adjacent. (Where high- and low-valued is\n with respect to the contour value sought.) In this case, either the\n high-valued elements can be 'connected together' via a thin isthmus that\n separates the low-valued elements, or vice-versa. When elements are\n connected together across a diagonal, they are considered 'fully\n connected' (also known as 'face+vertex-connected' or '8-connected'). Only\n high-valued or low-valued elements can be fully-connected, the other set\n will be considered as 'face-connected' or '4-connected'. By default,\n low-valued elements are considered fully-connected; this can be altered\n with the 'fully_connected' parameter.\n\n Output contours are not guaranteed to be closed: contours which intersect\n the array edge will be left open. All other contours will be closed. (The\n closed-ness of a contours can be tested by checking whether the beginning\n point is the same as the end point.)\n\n Contours are oriented. By default, array values lower than the contour\n value are to the left of the contour and values greater than the contour\n value are to the right. This means that contours will wind\n counter-clockwise (i.e. in 'positive orientation') around islands of\n low-valued pixels. This behavior can be altered with the\n 'positive_orientation' parameter.\n\n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n contour. This is a side-effect of how the input array is traversed, but\n can be relied upon.\n\n .. warning::\n\n Array coordinates/values are assumed to refer to the *center* of the\n array element. Take a simple example input: ``[0, 1]``. The interpolated\n position of 0.5 in this array is midway between the 0-element (at\n ``x=0``) and the 1-element (at ``x=1``), and thus would fall at\n ``x=0.5``.\n\n This means that to find reasonable contours, it is best to find contours\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n especially around structures that are a single array element wide. Instead\n choose a middle value, as above.\n\n References\n ----------\n .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n\n Examples\n --------\n >>> a = np.zeros((3, 3))\n >>> a[0, 0] = 1\n >>> a\n array([[ 1., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])\n >>> find_contours(a, 0.5)\n [array([[ 0. , 0.5],\n [ 0.5, 0. ]])]\n \"\"\"\n array = np.asarray(array, dtype=np.double)\n if array.ndim != 2:\n raise ValueError('Only 2D arrays are supported.')\n level = float(level)\n if (fully_connected not in _param_options or\n positive_orientation not in _param_options):\n raise ValueError('Parameters \"fully_connected\" and'\n ' \"positive_orientation\" must be either \"high\" or \"low\".')\n point_list = _find_contours_cy.iterate_and_store(array, level,\n fully_connected == 'high')\n contours = _assemble_contours(_take_2(point_list))\n if positive_orientation == 'high':\n contours = [c[::-1] for c in contours]\n return contours\n\n\ndef _take_2(seq):\n iterator = iter(seq)\n while(True):\n n1 = next(iterator)\n n2 = next(iterator)\n yield (n1, n2)\n\n\ndef _assemble_contours(points_iterator):\n current_index = 0\n contours = {}\n starts = {}\n ends = {}\n for from_point, to_point in points_iterator:\n # Ignore degenerate segments.\n # This happens when (and only when) one vertex of the square is\n # exactly the contour level, and the rest are above or below.\n # This degnerate vertex will be picked up later by neighboring squares.\n if from_point == to_point:\n continue\n\n tail_data = starts.get(to_point)\n head_data = ends.get(from_point)\n\n if tail_data is not None and head_data is not None:\n tail, tail_num = tail_data\n head, head_num = head_data\n # We need to connect these two contours.\n if tail is head:\n # We need to closed a contour.\n # Add the end point, and remove the contour from the\n # 'starts' and 'ends' dicts.\n head.append(to_point)\n del starts[to_point]\n del ends[from_point]\n else: # tail is not head\n # We need to join two distinct contours.\n # We want to keep the first contour segment created, so that\n # the final contours are ordered left->right, top->bottom.\n if tail_num > head_num:\n # tail was created second. Append tail to head.\n head.extend(tail)\n # remove all traces of tail:\n del starts[to_point]\n del ends[tail[-1]]\n del contours[tail_num]\n # remove the old end of head and add the new end.\n del ends[from_point]\n ends[head[-1]] = (head, head_num)\n else: # tail_num <= head_num\n # head was created second. Prepend head to tail.\n tail.extendleft(reversed(head))\n # remove all traces of head:\n del starts[head[0]]\n del ends[from_point]\n del contours[head_num]\n # remove the old start of tail and add the new start.\n del starts[to_point]\n starts[tail[0]] = (tail, tail_num)\n elif tail_data is None and head_data is None:\n # we need to add a new contour\n current_index += 1\n new_num = current_index\n new_contour = deque((from_point, to_point))\n contours[new_num] = new_contour\n starts[from_point] = (new_contour, new_num)\n ends[to_point] = (new_contour, new_num)\n elif tail_data is not None and head_data is None:\n tail, tail_num = tail_data\n # We've found a single contour to which the new segment should be\n # prepended.\n tail.appendleft(from_point)\n del starts[to_point]\n starts[from_point] = (tail, tail_num)\n elif tail_data is None and head_data is not None:\n head, head_num = head_data\n # We've found a single contour to which the new segment should be\n # appended\n head.append(to_point)\n del ends[from_point]\n ends[to_point] = (head, head_num)\n # end iteration over from_ and to_ points\n\n return [np.array(contour) for (num, contour) in sorted(contours.items())]\n", "path": "skimage/measure/_find_contours.py" } ]
diff --git a/skimage/measure/_find_contours.py b/skimage/measure/_find_contours.py index 0eea912676b..ff53795fce1 100755 --- a/skimage/measure/_find_contours.py +++ b/skimage/measure/_find_contours.py @@ -35,7 +35,7 @@ def find_contours(array, level, ------- contours : list of (n,2)-ndarrays Each contour is an ndarray of shape ``(n, 2)``, - consisting of n ``(x, y)`` coordinates along the contour. + consisting of n ``(row, column)`` coordinates along the contour. Notes -----
pallets__werkzeug-1402
Test Client does not keep Authorization in the request headers when following redirects Hi, When sending a POST request with `follow_redirects=True` and with `{Authorization: 'Bearer {}'.format(token)}` added to the headers and the endpoint returns a 303 code; the client follows the redirect, but the authorization header gets dropped on the second request. More specifically, on `test.py#open()` the second request's environ is missing the `HTTP_AUTHORIZATION` that the first one contains. The redirect is to a different endpoint on the same domain. This happens under python 2.7.13 and werkzeug 0.12.2. I looked for similar issues, the closest one was https://github.com/pallets/werkzeug/issues/26 but it does not mention the handling of 303. Is this behaviour intended or could it be an oversight? I couldn't see any mention of headers in https://tools.ietf.org/html/rfc7231#section-6.4.4
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.http\n ~~~~~~~~~~~~~\n\n Werkzeug comes with a bunch of utilities that help Werkzeug to deal with\n HTTP data. Most of the classes and functions provided by this module are\n used by the wrappers, but they are useful on their own, too, especially if\n the response and request objects are not used.\n\n This covers some of the more HTTP centric features of WSGI, some other\n utilities such as cookie handling are documented in the `werkzeug.utils`\n module.\n\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport re\nimport warnings\nfrom time import time, gmtime\ntry:\n from email.utils import parsedate_tz\nexcept ImportError: # pragma: no cover\n from email.Utils import parsedate_tz\ntry:\n from urllib.request import parse_http_list as _parse_list_header\n from urllib.parse import unquote_to_bytes as _unquote\nexcept ImportError: # pragma: no cover\n from urllib2 import parse_http_list as _parse_list_header, \\\n unquote as _unquote\nfrom datetime import datetime, timedelta\nfrom hashlib import md5\nimport base64\n\nfrom werkzeug._internal import _cookie_quote, _make_cookie_domain, \\\n _cookie_parse_impl\nfrom werkzeug._compat import to_unicode, iteritems, text_type, \\\n string_types, try_coerce_native, to_bytes, PY2, \\\n integer_types\n\n\n_cookie_charset = 'latin1'\n_basic_auth_charset = 'utf-8'\n# for explanation of \"media-range\", etc. see Sections 5.3.{1,2} of RFC 7231\n_accept_re = re.compile(\n r'''( # media-range capturing-parenthesis\n [^\\s;,]+ # type/subtype\n (?:[ \\t]*;[ \\t]* # \";\"\n (?: # parameter non-capturing-parenthesis\n [^\\s;,q][^\\s;,]* # token that doesn't start with \"q\"\n | # or\n q[^\\s;,=][^\\s;,]* # token that is more than just \"q\"\n )\n )* # zero or more parameters\n ) # end of media-range\n (?:[ \\t]*;[ \\t]*q= # weight is a \"q\" parameter\n (\\d*(?:\\.\\d+)?) # qvalue capturing-parentheses\n [^,]* # \"extension\" accept params: who cares?\n )? # accept params are optional\n ''', re.VERBOSE)\n_token_chars = frozenset(\"!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n '^_`abcdefghijklmnopqrstuvwxyz|~')\n_etag_re = re.compile(r'([Ww]/)?(?:\"(.*?)\"|(.*?))(?:\\s*,\\s*|$)')\n_unsafe_header_chars = set('()<>@,;:\\\"/[]?={} \\t')\n_option_header_piece_re = re.compile(r'''\n ;\\s*\n (?P<key>\n \"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\" # quoted string\n |\n [^\\s;,=*]+ # token\n )\n \\s*\n (?: # optionally followed by =value\n (?: # equals sign, possibly with encoding\n \\*\\s*=\\s* # * indicates extended notation\n (?P<encoding>[^\\s]+?)\n '(?P<language>[^\\s]*?)'\n |\n =\\s* # basic notation\n )\n (?P<value>\n \"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\" # quoted string\n |\n [^;,]+ # token\n )?\n )?\n \\s*\n''', flags=re.VERBOSE)\n_option_header_start_mime_type = re.compile(r',\\s*([^;,\\s]+)([;,]\\s*.+)?')\n\n_entity_headers = frozenset([\n 'allow', 'content-encoding', 'content-language', 'content-length',\n 'content-location', 'content-md5', 'content-range', 'content-type',\n 'expires', 'last-modified'\n])\n_hop_by_hop_headers = frozenset([\n 'connection', 'keep-alive', 'proxy-authenticate',\n 'proxy-authorization', 'te', 'trailer', 'transfer-encoding',\n 'upgrade'\n])\n\n\nHTTP_STATUS_CODES = {\n 100: 'Continue',\n 101: 'Switching Protocols',\n 102: 'Processing',\n 200: 'OK',\n 201: 'Created',\n 202: 'Accepted',\n 203: 'Non Authoritative Information',\n 204: 'No Content',\n 205: 'Reset Content',\n 206: 'Partial Content',\n 207: 'Multi Status',\n 226: 'IM Used', # see RFC 3229\n 300: 'Multiple Choices',\n 301: 'Moved Permanently',\n 302: 'Found',\n 303: 'See Other',\n 304: 'Not Modified',\n 305: 'Use Proxy',\n 307: 'Temporary Redirect',\n 400: 'Bad Request',\n 401: 'Unauthorized',\n 402: 'Payment Required', # unused\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not Allowed',\n 406: 'Not Acceptable',\n 407: 'Proxy Authentication Required',\n 408: 'Request Timeout',\n 409: 'Conflict',\n 410: 'Gone',\n 411: 'Length Required',\n 412: 'Precondition Failed',\n 413: 'Request Entity Too Large',\n 414: 'Request URI Too Long',\n 415: 'Unsupported Media Type',\n 416: 'Requested Range Not Satisfiable',\n 417: 'Expectation Failed',\n 418: 'I\\'m a teapot', # see RFC 2324\n 421: 'Misdirected Request', # see RFC 7540\n 422: 'Unprocessable Entity',\n 423: 'Locked',\n 424: 'Failed Dependency',\n 426: 'Upgrade Required',\n 428: 'Precondition Required', # see RFC 6585\n 429: 'Too Many Requests',\n 431: 'Request Header Fields Too Large',\n 449: 'Retry With', # proprietary MS extension\n 451: 'Unavailable For Legal Reasons',\n 500: 'Internal Server Error',\n 501: 'Not Implemented',\n 502: 'Bad Gateway',\n 503: 'Service Unavailable',\n 504: 'Gateway Timeout',\n 505: 'HTTP Version Not Supported',\n 507: 'Insufficient Storage',\n 510: 'Not Extended'\n}\n\n\ndef wsgi_to_bytes(data):\n \"\"\"coerce wsgi unicode represented bytes to real ones\n\n \"\"\"\n if isinstance(data, bytes):\n return data\n return data.encode('latin1') # XXX: utf8 fallback?\n\n\ndef bytes_to_wsgi(data):\n assert isinstance(data, bytes), 'data must be bytes'\n if isinstance(data, str):\n return data\n else:\n return data.decode('latin1')\n\n\ndef quote_header_value(value, extra_chars='', allow_token=True):\n \"\"\"Quote a header value if necessary.\n\n .. versionadded:: 0.5\n\n :param value: the value to quote.\n :param extra_chars: a list of extra characters to skip quoting.\n :param allow_token: if this is enabled token values are returned\n unchanged.\n \"\"\"\n if isinstance(value, bytes):\n value = bytes_to_wsgi(value)\n value = str(value)\n if allow_token:\n token_chars = _token_chars | set(extra_chars)\n if set(value).issubset(token_chars):\n return value\n return '\"%s\"' % value.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n\n\ndef unquote_header_value(value, is_filename=False):\n r\"\"\"Unquotes a header value. (Reversal of :func:`quote_header_value`).\n This does not use the real unquoting but what browsers are actually\n using for quoting.\n\n .. versionadded:: 0.5\n\n :param value: the header value to unquote.\n \"\"\"\n if value and value[0] == value[-1] == '\"':\n # this is not the real unquoting, but fixing this so that the\n # RFC is met will result in bugs with internet explorer and\n # probably some other browsers as well. IE for example is\n # uploading files with \"C:\\foo\\bar.txt\" as filename\n value = value[1:-1]\n\n # if this is a filename and the starting characters look like\n # a UNC path, then just return the value without quotes. Using the\n # replace sequence below on a UNC path has the effect of turning\n # the leading double slash into a single slash and then\n # _fix_ie_filename() doesn't work correctly. See #458.\n if not is_filename or value[:2] != '\\\\\\\\':\n return value.replace('\\\\\\\\', '\\\\').replace('\\\\\"', '\"')\n return value\n\n\ndef dump_options_header(header, options):\n \"\"\"The reverse function to :func:`parse_options_header`.\n\n :param header: the header to dump\n :param options: a dict of options to append.\n \"\"\"\n segments = []\n if header is not None:\n segments.append(header)\n for key, value in iteritems(options):\n if value is None:\n segments.append(key)\n else:\n segments.append('%s=%s' % (key, quote_header_value(value)))\n return '; '.join(segments)\n\n\ndef dump_header(iterable, allow_token=True):\n \"\"\"Dump an HTTP header again. This is the reversal of\n :func:`parse_list_header`, :func:`parse_set_header` and\n :func:`parse_dict_header`. This also quotes strings that include an\n equals sign unless you pass it as dict of key, value pairs.\n\n >>> dump_header({'foo': 'bar baz'})\n 'foo=\"bar baz\"'\n >>> dump_header(('foo', 'bar baz'))\n 'foo, \"bar baz\"'\n\n :param iterable: the iterable or dict of values to quote.\n :param allow_token: if set to `False` tokens as values are disallowed.\n See :func:`quote_header_value` for more details.\n \"\"\"\n if isinstance(iterable, dict):\n items = []\n for key, value in iteritems(iterable):\n if value is None:\n items.append(key)\n else:\n items.append('%s=%s' % (\n key,\n quote_header_value(value, allow_token=allow_token)\n ))\n else:\n items = [quote_header_value(x, allow_token=allow_token)\n for x in iterable]\n return ', '.join(items)\n\n\ndef parse_list_header(value):\n \"\"\"Parse lists as described by RFC 2068 Section 2.\n\n In particular, parse comma-separated lists where the elements of\n the list may include quoted-strings. A quoted-string could\n contain a comma. A non-quoted string could have quotes in the\n middle. Quotes are removed automatically after parsing.\n\n It basically works like :func:`parse_set_header` just that items\n may appear multiple times and case sensitivity is preserved.\n\n The return value is a standard :class:`list`:\n\n >>> parse_list_header('token, \"quoted value\"')\n ['token', 'quoted value']\n\n To create a header from the :class:`list` again, use the\n :func:`dump_header` function.\n\n :param value: a string with a list header.\n :return: :class:`list`\n \"\"\"\n result = []\n for item in _parse_list_header(value):\n if item[:1] == item[-1:] == '\"':\n item = unquote_header_value(item[1:-1])\n result.append(item)\n return result\n\n\ndef parse_dict_header(value, cls=dict):\n \"\"\"Parse lists of key, value pairs as described by RFC 2068 Section 2 and\n convert them into a python dict (or any other mapping object created from\n the type with a dict like interface provided by the `cls` argument):\n\n >>> d = parse_dict_header('foo=\"is a fish\", bar=\"as well\"')\n >>> type(d) is dict\n True\n >>> sorted(d.items())\n [('bar', 'as well'), ('foo', 'is a fish')]\n\n If there is no value for a key it will be `None`:\n\n >>> parse_dict_header('key_without_value')\n {'key_without_value': None}\n\n To create a header from the :class:`dict` again, use the\n :func:`dump_header` function.\n\n .. versionchanged:: 0.9\n Added support for `cls` argument.\n\n :param value: a string with a dict header.\n :param cls: callable to use for storage of parsed results.\n :return: an instance of `cls`\n \"\"\"\n result = cls()\n if not isinstance(value, text_type):\n # XXX: validate\n value = bytes_to_wsgi(value)\n for item in _parse_list_header(value):\n if '=' not in item:\n result[item] = None\n continue\n name, value = item.split('=', 1)\n if value[:1] == value[-1:] == '\"':\n value = unquote_header_value(value[1:-1])\n result[name] = value\n return result\n\n\ndef parse_options_header(value, multiple=False):\n \"\"\"Parse a ``Content-Type`` like header into a tuple with the content\n type and the options:\n\n >>> parse_options_header('text/html; charset=utf8')\n ('text/html', {'charset': 'utf8'})\n\n This should not be used to parse ``Cache-Control`` like headers that use\n a slightly different format. For these headers use the\n :func:`parse_dict_header` function.\n\n .. versionadded:: 0.5\n\n :param value: the header to parse.\n :param multiple: Whether try to parse and return multiple MIME types\n :return: (mimetype, options) or (mimetype, options, mimetype, options, …)\n if multiple=True\n \"\"\"\n if not value:\n return '', {}\n\n result = []\n\n value = \",\" + value.replace(\"\\n\", \",\")\n while value:\n match = _option_header_start_mime_type.match(value)\n if not match:\n break\n result.append(match.group(1)) # mimetype\n options = {}\n # Parse options\n rest = match.group(2)\n while rest:\n optmatch = _option_header_piece_re.match(rest)\n if not optmatch:\n break\n option, encoding, _, option_value = optmatch.groups()\n option = unquote_header_value(option)\n if option_value is not None:\n option_value = unquote_header_value(\n option_value,\n option == 'filename')\n if encoding is not None:\n option_value = _unquote(option_value).decode(encoding)\n options[option] = option_value\n rest = rest[optmatch.end():]\n result.append(options)\n if multiple is False:\n return tuple(result)\n value = rest\n\n return tuple(result) if result else ('', {})\n\n\ndef parse_accept_header(value, cls=None):\n \"\"\"Parses an HTTP Accept-* header. This does not implement a complete\n valid algorithm but one that supports at least value and quality\n extraction.\n\n Returns a new :class:`Accept` object (basically a list of ``(value, quality)``\n tuples sorted by the quality with some additional accessor methods).\n\n The second parameter can be a subclass of :class:`Accept` that is created\n with the parsed values and returned.\n\n :param value: the accept header string to be parsed.\n :param cls: the wrapper class for the return value (can be\n :class:`Accept` or a subclass thereof)\n :return: an instance of `cls`.\n \"\"\"\n if cls is None:\n cls = Accept\n\n if not value:\n return cls(None)\n\n result = []\n for match in _accept_re.finditer(value):\n quality = match.group(2)\n if not quality:\n quality = 1\n else:\n quality = max(min(float(quality), 1), 0)\n result.append((match.group(1), quality))\n return cls(result)\n\n\ndef parse_cache_control_header(value, on_update=None, cls=None):\n \"\"\"Parse a cache control header. The RFC differs between response and\n request cache control, this method does not. It's your responsibility\n to not use the wrong control statements.\n\n .. versionadded:: 0.5\n The `cls` was added. If not specified an immutable\n :class:`~werkzeug.datastructures.RequestCacheControl` is returned.\n\n :param value: a cache control header to be parsed.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.CacheControl`\n object is changed.\n :param cls: the class for the returned object. By default\n :class:`~werkzeug.datastructures.RequestCacheControl` is used.\n :return: a `cls` object.\n \"\"\"\n if cls is None:\n cls = RequestCacheControl\n if not value:\n return cls(None, on_update)\n return cls(parse_dict_header(value), on_update)\n\n\ndef parse_set_header(value, on_update=None):\n \"\"\"Parse a set-like header and return a\n :class:`~werkzeug.datastructures.HeaderSet` object:\n\n >>> hs = parse_set_header('token, \"quoted value\"')\n\n The return value is an object that treats the items case-insensitively\n and keeps the order of the items:\n\n >>> 'TOKEN' in hs\n True\n >>> hs.index('quoted value')\n 1\n >>> hs\n HeaderSet(['token', 'quoted value'])\n\n To create a header from the :class:`HeaderSet` again, use the\n :func:`dump_header` function.\n\n :param value: a set header to be parsed.\n :param on_update: an optional callable that is called every time a\n value on the :class:`~werkzeug.datastructures.HeaderSet`\n object is changed.\n :return: a :class:`~werkzeug.datastructures.HeaderSet`\n \"\"\"\n if not value:\n return HeaderSet(None, on_update)\n return HeaderSet(parse_list_header(value), on_update)\n\n\ndef parse_authorization_header(value):\n \"\"\"Parse an HTTP basic/digest authorization header transmitted by the web\n browser. The return value is either `None` if the header was invalid or\n not given, otherwise an :class:`~werkzeug.datastructures.Authorization`\n object.\n\n :param value: the authorization header to parse.\n :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.\n \"\"\"\n if not value:\n return\n value = wsgi_to_bytes(value)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except ValueError:\n return\n if auth_type == b'basic':\n try:\n username, password = base64.b64decode(auth_info).split(b':', 1)\n except Exception:\n return\n return Authorization(\n 'basic', {\n 'username': to_unicode(username, _basic_auth_charset),\n 'password': to_unicode(password, _basic_auth_charset)\n }\n )\n elif auth_type == b'digest':\n auth_map = parse_dict_header(auth_info)\n for key in 'username', 'realm', 'nonce', 'uri', 'response':\n if key not in auth_map:\n return\n if 'qop' in auth_map:\n if not auth_map.get('nc') or not auth_map.get('cnonce'):\n return\n return Authorization('digest', auth_map)\n\n\ndef parse_www_authenticate_header(value, on_update=None):\n \"\"\"Parse an HTTP WWW-Authenticate header into a\n :class:`~werkzeug.datastructures.WWWAuthenticate` object.\n\n :param value: a WWW-Authenticate header to parse.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.WWWAuthenticate`\n object is changed.\n :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.\n \"\"\"\n if not value:\n return WWWAuthenticate(on_update=on_update)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except (ValueError, AttributeError):\n return WWWAuthenticate(value.strip().lower(), on_update=on_update)\n return WWWAuthenticate(auth_type, parse_dict_header(auth_info),\n on_update)\n\n\ndef parse_if_range_header(value):\n \"\"\"Parses an if-range header which can be an etag or a date. Returns\n a :class:`~werkzeug.datastructures.IfRange` object.\n\n .. versionadded:: 0.7\n \"\"\"\n if not value:\n return IfRange()\n date = parse_date(value)\n if date is not None:\n return IfRange(date=date)\n # drop weakness information\n return IfRange(unquote_etag(value)[0])\n\n\ndef parse_range_header(value, make_inclusive=True):\n \"\"\"Parses a range header into a :class:`~werkzeug.datastructures.Range`\n object. If the header is missing or malformed `None` is returned.\n `ranges` is a list of ``(start, stop)`` tuples where the ranges are\n non-inclusive.\n\n .. versionadded:: 0.7\n \"\"\"\n if not value or '=' not in value:\n return None\n\n ranges = []\n last_end = 0\n units, rng = value.split('=', 1)\n units = units.strip().lower()\n\n for item in rng.split(','):\n item = item.strip()\n if '-' not in item:\n return None\n if item.startswith('-'):\n if last_end < 0:\n return None\n try:\n begin = int(item)\n except ValueError:\n return None\n end = None\n last_end = -1\n elif '-' in item:\n begin, end = item.split('-', 1)\n begin = begin.strip()\n end = end.strip()\n if not begin.isdigit():\n return None\n begin = int(begin)\n if begin < last_end or last_end < 0:\n return None\n if end:\n if not end.isdigit():\n return None\n end = int(end) + 1\n if begin >= end:\n return None\n else:\n end = None\n last_end = end\n ranges.append((begin, end))\n\n return Range(units, ranges)\n\n\ndef parse_content_range_header(value, on_update=None):\n \"\"\"Parses a range header into a\n :class:`~werkzeug.datastructures.ContentRange` object or `None` if\n parsing is not possible.\n\n .. versionadded:: 0.7\n\n :param value: a content range header to be parsed.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.ContentRange`\n object is changed.\n \"\"\"\n if value is None:\n return None\n try:\n units, rangedef = (value or '').strip().split(None, 1)\n except ValueError:\n return None\n\n if '/' not in rangedef:\n return None\n rng, length = rangedef.split('/', 1)\n if length == '*':\n length = None\n elif length.isdigit():\n length = int(length)\n else:\n return None\n\n if rng == '*':\n return ContentRange(units, None, None, length, on_update=on_update)\n elif '-' not in rng:\n return None\n\n start, stop = rng.split('-', 1)\n try:\n start = int(start)\n stop = int(stop) + 1\n except ValueError:\n return None\n\n if is_byte_range_valid(start, stop, length):\n return ContentRange(units, start, stop, length, on_update=on_update)\n\n\ndef quote_etag(etag, weak=False):\n \"\"\"Quote an etag.\n\n :param etag: the etag to quote.\n :param weak: set to `True` to tag it \"weak\".\n \"\"\"\n if '\"' in etag:\n raise ValueError('invalid etag')\n etag = '\"%s\"' % etag\n if weak:\n etag = 'W/' + etag\n return etag\n\n\ndef unquote_etag(etag):\n \"\"\"Unquote a single etag:\n\n >>> unquote_etag('W/\"bar\"')\n ('bar', True)\n >>> unquote_etag('\"bar\"')\n ('bar', False)\n\n :param etag: the etag identifier to unquote.\n :return: a ``(etag, weak)`` tuple.\n \"\"\"\n if not etag:\n return None, None\n etag = etag.strip()\n weak = False\n if etag.startswith(('W/', 'w/')):\n weak = True\n etag = etag[2:]\n if etag[:1] == etag[-1:] == '\"':\n etag = etag[1:-1]\n return etag, weak\n\n\ndef parse_etags(value):\n \"\"\"Parse an etag header.\n\n :param value: the tag header to parse\n :return: an :class:`~werkzeug.datastructures.ETags` object.\n \"\"\"\n if not value:\n return ETags()\n strong = []\n weak = []\n end = len(value)\n pos = 0\n while pos < end:\n match = _etag_re.match(value, pos)\n if match is None:\n break\n is_weak, quoted, raw = match.groups()\n if raw == '*':\n return ETags(star_tag=True)\n elif quoted:\n raw = quoted\n if is_weak:\n weak.append(raw)\n else:\n strong.append(raw)\n pos = match.end()\n return ETags(strong, weak)\n\n\ndef generate_etag(data):\n \"\"\"Generate an etag for some data.\"\"\"\n return md5(data).hexdigest()\n\n\ndef parse_date(value):\n \"\"\"Parse one of the following date formats into a datetime object:\n\n .. sourcecode:: text\n\n Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123\n Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036\n Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format\n\n If parsing fails the return value is `None`.\n\n :param value: a string with a supported date format.\n :return: a :class:`datetime.datetime` object.\n \"\"\"\n if value:\n t = parsedate_tz(value.strip())\n if t is not None:\n try:\n year = t[0]\n # unfortunately that function does not tell us if two digit\n # years were part of the string, or if they were prefixed\n # with two zeroes. So what we do is to assume that 69-99\n # refer to 1900, and everything below to 2000\n if year >= 0 and year <= 68:\n year += 2000\n elif year >= 69 and year <= 99:\n year += 1900\n return datetime(*((year,) + t[1:7])) - \\\n timedelta(seconds=t[-1] or 0)\n except (ValueError, OverflowError):\n return None\n\n\ndef _dump_date(d, delim):\n \"\"\"Used for `http_date` and `cookie_date`.\"\"\"\n if d is None:\n d = gmtime()\n elif isinstance(d, datetime):\n d = d.utctimetuple()\n elif isinstance(d, (integer_types, float)):\n d = gmtime(d)\n return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (\n ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],\n d.tm_mday, delim,\n ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec')[d.tm_mon - 1],\n delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec\n )\n\n\ndef cookie_date(expires=None):\n \"\"\"Formats the time to ensure compatibility with Netscape's cookie\n standard.\n\n Accepts a floating point number expressed in seconds since the epoch in, a\n datetime object or a timetuple. All times in UTC. The :func:`parse_date`\n function can be used to parse such a date.\n\n Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.\n\n :param expires: If provided that date is used, otherwise the current.\n \"\"\"\n return _dump_date(expires, '-')\n\n\ndef http_date(timestamp=None):\n \"\"\"Formats the time to match the RFC1123 date format.\n\n Accepts a floating point number expressed in seconds since the epoch in, a\n datetime object or a timetuple. All times in UTC. The :func:`parse_date`\n function can be used to parse such a date.\n\n Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.\n\n :param timestamp: If provided that date is used, otherwise the current.\n \"\"\"\n return _dump_date(timestamp, ' ')\n\n\ndef parse_age(value=None):\n \"\"\"Parses a base-10 integer count of seconds into a timedelta.\n\n If parsing fails, the return value is `None`.\n\n :param value: a string consisting of an integer represented in base-10\n :return: a :class:`datetime.timedelta` object or `None`.\n \"\"\"\n if not value:\n return None\n try:\n seconds = int(value)\n except ValueError:\n return None\n if seconds < 0:\n return None\n try:\n return timedelta(seconds=seconds)\n except OverflowError:\n return None\n\n\ndef dump_age(age=None):\n \"\"\"Formats the duration as a base-10 integer.\n\n :param age: should be an integer number of seconds,\n a :class:`datetime.timedelta` object, or,\n if the age is unknown, `None` (default).\n \"\"\"\n if age is None:\n return\n if isinstance(age, timedelta):\n # do the equivalent of Python 2.7's timedelta.total_seconds(),\n # but disregarding fractional seconds\n age = age.seconds + (age.days * 24 * 3600)\n\n age = int(age)\n if age < 0:\n raise ValueError('age cannot be negative')\n\n return str(age)\n\n\ndef is_resource_modified(environ, etag=None, data=None, last_modified=None,\n ignore_if_range=True):\n \"\"\"Convenience method for conditional requests.\n\n :param environ: the WSGI environment of the request to be checked.\n :param etag: the etag for the response for comparison.\n :param data: or alternatively the data of the response to automatically\n generate an etag using :func:`generate_etag`.\n :param last_modified: an optional date of the last modification.\n :param ignore_if_range: If `False`, `If-Range` header will be taken into\n account.\n :return: `True` if the resource was modified, otherwise `False`.\n \"\"\"\n if etag is None and data is not None:\n etag = generate_etag(data)\n elif data is not None:\n raise TypeError('both data and etag given')\n if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):\n return False\n\n unmodified = False\n if isinstance(last_modified, string_types):\n last_modified = parse_date(last_modified)\n\n # ensure that microsecond is zero because the HTTP spec does not transmit\n # that either and we might have some false positives. See issue #39\n if last_modified is not None:\n last_modified = last_modified.replace(microsecond=0)\n\n if_range = None\n if not ignore_if_range and 'HTTP_RANGE' in environ:\n # https://tools.ietf.org/html/rfc7233#section-3.2\n # A server MUST ignore an If-Range header field received in a request\n # that does not contain a Range header field.\n if_range = parse_if_range_header(environ.get('HTTP_IF_RANGE'))\n\n if if_range is not None and if_range.date is not None:\n modified_since = if_range.date\n else:\n modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))\n\n if modified_since and last_modified and last_modified <= modified_since:\n unmodified = True\n\n if etag:\n etag, _ = unquote_etag(etag)\n if if_range is not None and if_range.etag is not None:\n unmodified = parse_etags(if_range.etag).contains(etag)\n else:\n if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))\n if if_none_match:\n # https://tools.ietf.org/html/rfc7232#section-3.2\n # \"A recipient MUST use the weak comparison function when comparing\n # entity-tags for If-None-Match\"\n unmodified = if_none_match.contains_weak(etag)\n\n # https://tools.ietf.org/html/rfc7232#section-3.1\n # \"Origin server MUST use the strong comparison function when\n # comparing entity-tags for If-Match\"\n if_match = parse_etags(environ.get('HTTP_IF_MATCH'))\n if if_match:\n unmodified = not if_match.is_strong(etag)\n\n return not unmodified\n\n\ndef remove_entity_headers(headers, allowed=('expires', 'content-location')):\n \"\"\"Remove all entity headers from a list or :class:`Headers` object. This\n operation works in-place. `Expires` and `Content-Location` headers are\n by default not removed. The reason for this is :rfc:`2616` section\n 10.3.5 which specifies some entity headers that should be sent.\n\n .. versionchanged:: 0.5\n added `allowed` parameter.\n\n :param headers: a list or :class:`Headers` object.\n :param allowed: a list of headers that should still be allowed even though\n they are entity headers.\n \"\"\"\n allowed = set(x.lower() for x in allowed)\n headers[:] = [(key, value) for key, value in headers if\n not is_entity_header(key) or key.lower() in allowed]\n\n\ndef remove_hop_by_hop_headers(headers):\n \"\"\"Remove all HTTP/1.1 \"Hop-by-Hop\" headers from a list or\n :class:`Headers` object. This operation works in-place.\n\n .. versionadded:: 0.5\n\n :param headers: a list or :class:`Headers` object.\n \"\"\"\n headers[:] = [(key, value) for key, value in headers if\n not is_hop_by_hop_header(key)]\n\n\ndef is_entity_header(header):\n \"\"\"Check if a header is an entity header.\n\n .. versionadded:: 0.5\n\n :param header: the header to test.\n :return: `True` if it's an entity header, `False` otherwise.\n \"\"\"\n return header.lower() in _entity_headers\n\n\ndef is_hop_by_hop_header(header):\n \"\"\"Check if a header is an HTTP/1.1 \"Hop-by-Hop\" header.\n\n .. versionadded:: 0.5\n\n :param header: the header to test.\n :return: `True` if it's an HTTP/1.1 \"Hop-by-Hop\" header, `False` otherwise.\n \"\"\"\n return header.lower() in _hop_by_hop_headers\n\n\ndef parse_cookie(header, charset='utf-8', errors='replace', cls=None):\n \"\"\"Parse a cookie. Either from a string or WSGI environ.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a\n :exc:`HTTPUnicodeError` is raised.\n\n .. versionchanged:: 0.5\n This function now returns a :class:`TypeConversionDict` instead of a\n regular dict. The `cls` parameter was added.\n\n :param header: the header to be used to parse the cookie. Alternatively\n this can be a WSGI environment.\n :param charset: the charset for the cookie values.\n :param errors: the error behavior for the charset decoding.\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`TypeConversionDict` is\n used.\n \"\"\"\n if isinstance(header, dict):\n header = header.get('HTTP_COOKIE', '')\n elif header is None:\n header = ''\n\n # If the value is an unicode string it's mangled through latin1. This\n # is done because on PEP 3333 on Python 3 all headers are assumed latin1\n # which however is incorrect for cookies, which are sent in page encoding.\n # As a result we\n if isinstance(header, text_type):\n header = header.encode('latin1', 'replace')\n\n if cls is None:\n cls = TypeConversionDict\n\n def _parse_pairs():\n for key, val in _cookie_parse_impl(header):\n key = to_unicode(key, charset, errors, allow_none_charset=True)\n if not key:\n continue\n val = to_unicode(val, charset, errors, allow_none_charset=True)\n yield try_coerce_native(key), val\n\n return cls(_parse_pairs())\n\n\ndef dump_cookie(key, value='', max_age=None, expires=None, path='/',\n domain=None, secure=False, httponly=False,\n charset='utf-8', sync_expires=True, max_size=4093,\n samesite=None):\n \"\"\"Creates a new Set-Cookie header without the ``Set-Cookie`` prefix\n The parameters are the same as in the cookie Morsel object in the\n Python standard library but it accepts unicode data, too.\n\n On Python 3 the return value of this function will be a unicode\n string, on Python 2 it will be a native string. In both cases the\n return value is usually restricted to ascii as the vast majority of\n values are properly escaped, but that is no guarantee. If a unicode\n string is returned it's tunneled through latin1 as required by\n PEP 3333.\n\n The return value is not ASCII safe if the key contains unicode\n characters. This is technically against the specification but\n happens in the wild. It's strongly recommended to not use\n non-ASCII values for the keys.\n\n :param max_age: should be a number of seconds, or `None` (default) if\n the cookie should last only as long as the client's\n browser session. Additionally `timedelta` objects\n are accepted, too.\n :param expires: should be a `datetime` object or unix timestamp.\n :param path: limits the cookie to a given path, per default it will\n span the whole domain.\n :param domain: Use this if you want to set a cross-domain cookie. For\n example, ``domain=\".example.com\"`` will set a cookie\n that is readable by the domain ``www.example.com``,\n ``foo.example.com`` etc. Otherwise, a cookie will only\n be readable by the domain that set it.\n :param secure: The cookie will only be available via HTTPS\n :param httponly: disallow JavaScript to access the cookie. This is an\n extension to the cookie standard and probably not\n supported by all browsers.\n :param charset: the encoding for unicode values.\n :param sync_expires: automatically set expires if max_age is defined\n but expires not.\n :param max_size: Warn if the final header value exceeds this size. The\n default, 4093, should be safely `supported by most browsers\n <cookie_>`_. Set to 0 to disable this check.\n :param samesite: Limits the scope of the cookie such that it will only\n be attached to requests if those requests are \"same-site\".\n\n .. _`cookie`: http://browsercookielimits.squawky.net/\n \"\"\"\n key = to_bytes(key, charset)\n value = to_bytes(value, charset)\n\n if path is not None:\n path = iri_to_uri(path, charset)\n domain = _make_cookie_domain(domain)\n if isinstance(max_age, timedelta):\n max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds\n if expires is not None:\n if not isinstance(expires, string_types):\n expires = cookie_date(expires)\n elif max_age is not None and sync_expires:\n expires = to_bytes(cookie_date(time() + max_age))\n\n samesite = samesite.title() if samesite else None\n if samesite not in ('Strict', 'Lax', None):\n raise ValueError(\"invalid SameSite value; must be 'Strict', 'Lax' or None\")\n\n buf = [key + b'=' + _cookie_quote(value)]\n\n # XXX: In theory all of these parameters that are not marked with `None`\n # should be quoted. Because stdlib did not quote it before I did not\n # want to introduce quoting there now.\n for k, v, q in ((b'Domain', domain, True),\n (b'Expires', expires, False,),\n (b'Max-Age', max_age, False),\n (b'Secure', secure, None),\n (b'HttpOnly', httponly, None),\n (b'Path', path, False),\n (b'SameSite', samesite, False)):\n if q is None:\n if v:\n buf.append(k)\n continue\n\n if v is None:\n continue\n\n tmp = bytearray(k)\n if not isinstance(v, (bytes, bytearray)):\n v = to_bytes(text_type(v), charset)\n if q:\n v = _cookie_quote(v)\n tmp += b'=' + v\n buf.append(bytes(tmp))\n\n # The return value will be an incorrectly encoded latin1 header on\n # Python 3 for consistency with the headers object and a bytestring\n # on Python 2 because that's how the API makes more sense.\n rv = b'; '.join(buf)\n if not PY2:\n rv = rv.decode('latin1')\n\n # Warn if the final value of the cookie is less than the limit. If the\n # cookie is too large, then it may be silently ignored, which can be quite\n # hard to debug.\n cookie_size = len(rv)\n\n if max_size and cookie_size > max_size:\n value_size = len(value)\n warnings.warn(\n 'The \"{key}\" cookie is too large: the value was {value_size} bytes'\n ' but the header required {extra_size} extra bytes. The final size'\n ' was {cookie_size} bytes but the limit is {max_size} bytes.'\n ' Browsers may silently ignore cookies larger than this.'.format(\n key=key,\n value_size=value_size,\n extra_size=cookie_size - value_size,\n cookie_size=cookie_size,\n max_size=max_size\n ),\n stacklevel=2\n )\n\n return rv\n\n\ndef is_byte_range_valid(start, stop, length):\n \"\"\"Checks if a given byte content range is valid for the given length.\n\n .. versionadded:: 0.7\n \"\"\"\n if (start is None) != (stop is None):\n return False\n elif start is None:\n return length is None or length >= 0\n elif length is None:\n return 0 <= start < stop\n elif start >= stop:\n return False\n return 0 <= start < length\n\n\n# circular dependency fun\nfrom werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \\\n WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \\\n RequestCacheControl\n\n\n# DEPRECATED\n# backwards compatible imports\nfrom werkzeug.datastructures import ( # noqa\n MIMEAccept, CharsetAccept, LanguageAccept, Headers\n)\nfrom werkzeug.urls import iri_to_uri\n", "path": "werkzeug/http.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.http\n ~~~~~~~~~~~~~\n\n Werkzeug comes with a bunch of utilities that help Werkzeug to deal with\n HTTP data. Most of the classes and functions provided by this module are\n used by the wrappers, but they are useful on their own, too, especially if\n the response and request objects are not used.\n\n This covers some of the more HTTP centric features of WSGI, some other\n utilities such as cookie handling are documented in the `werkzeug.utils`\n module.\n\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport re\nimport warnings\nfrom time import time, gmtime\ntry:\n from email.utils import parsedate_tz\nexcept ImportError: # pragma: no cover\n from email.Utils import parsedate_tz\ntry:\n from urllib.request import parse_http_list as _parse_list_header\n from urllib.parse import unquote_to_bytes as _unquote\nexcept ImportError: # pragma: no cover\n from urllib2 import parse_http_list as _parse_list_header, \\\n unquote as _unquote\nfrom datetime import datetime, timedelta\nfrom hashlib import md5\nimport base64\n\nfrom werkzeug._internal import _cookie_quote, _make_cookie_domain, \\\n _cookie_parse_impl\nfrom werkzeug._compat import to_unicode, iteritems, text_type, \\\n string_types, try_coerce_native, to_bytes, PY2, \\\n integer_types\n\n\n_cookie_charset = 'latin1'\n_basic_auth_charset = 'utf-8'\n# for explanation of \"media-range\", etc. see Sections 5.3.{1,2} of RFC 7231\n_accept_re = re.compile(\n r'''( # media-range capturing-parenthesis\n [^\\s;,]+ # type/subtype\n (?:[ \\t]*;[ \\t]* # \";\"\n (?: # parameter non-capturing-parenthesis\n [^\\s;,q][^\\s;,]* # token that doesn't start with \"q\"\n | # or\n q[^\\s;,=][^\\s;,]* # token that is more than just \"q\"\n )\n )* # zero or more parameters\n ) # end of media-range\n (?:[ \\t]*;[ \\t]*q= # weight is a \"q\" parameter\n (\\d*(?:\\.\\d+)?) # qvalue capturing-parentheses\n [^,]* # \"extension\" accept params: who cares?\n )? # accept params are optional\n ''', re.VERBOSE)\n_token_chars = frozenset(\"!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n '^_`abcdefghijklmnopqrstuvwxyz|~')\n_etag_re = re.compile(r'([Ww]/)?(?:\"(.*?)\"|(.*?))(?:\\s*,\\s*|$)')\n_unsafe_header_chars = set('()<>@,;:\\\"/[]?={} \\t')\n_option_header_piece_re = re.compile(r'''\n ;\\s*\n (?P<key>\n \"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\" # quoted string\n |\n [^\\s;,=*]+ # token\n )\n \\s*\n (?: # optionally followed by =value\n (?: # equals sign, possibly with encoding\n \\*\\s*=\\s* # * indicates extended notation\n (?P<encoding>[^\\s]+?)\n '(?P<language>[^\\s]*?)'\n |\n =\\s* # basic notation\n )\n (?P<value>\n \"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\" # quoted string\n |\n [^;,]+ # token\n )?\n )?\n \\s*\n''', flags=re.VERBOSE)\n_option_header_start_mime_type = re.compile(r',\\s*([^;,\\s]+)([;,]\\s*.+)?')\n\n_entity_headers = frozenset([\n 'allow', 'content-encoding', 'content-language', 'content-length',\n 'content-location', 'content-md5', 'content-range', 'content-type',\n 'expires', 'last-modified'\n])\n_hop_by_hop_headers = frozenset([\n 'connection', 'keep-alive', 'proxy-authenticate',\n 'proxy-authorization', 'te', 'trailer', 'transfer-encoding',\n 'upgrade'\n])\n\n\nHTTP_STATUS_CODES = {\n 100: 'Continue',\n 101: 'Switching Protocols',\n 102: 'Processing',\n 200: 'OK',\n 201: 'Created',\n 202: 'Accepted',\n 203: 'Non Authoritative Information',\n 204: 'No Content',\n 205: 'Reset Content',\n 206: 'Partial Content',\n 207: 'Multi Status',\n 226: 'IM Used', # see RFC 3229\n 300: 'Multiple Choices',\n 301: 'Moved Permanently',\n 302: 'Found',\n 303: 'See Other',\n 304: 'Not Modified',\n 305: 'Use Proxy',\n 307: 'Temporary Redirect',\n 308: 'Permanent Redirect',\n 400: 'Bad Request',\n 401: 'Unauthorized',\n 402: 'Payment Required', # unused\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not Allowed',\n 406: 'Not Acceptable',\n 407: 'Proxy Authentication Required',\n 408: 'Request Timeout',\n 409: 'Conflict',\n 410: 'Gone',\n 411: 'Length Required',\n 412: 'Precondition Failed',\n 413: 'Request Entity Too Large',\n 414: 'Request URI Too Long',\n 415: 'Unsupported Media Type',\n 416: 'Requested Range Not Satisfiable',\n 417: 'Expectation Failed',\n 418: 'I\\'m a teapot', # see RFC 2324\n 421: 'Misdirected Request', # see RFC 7540\n 422: 'Unprocessable Entity',\n 423: 'Locked',\n 424: 'Failed Dependency',\n 426: 'Upgrade Required',\n 428: 'Precondition Required', # see RFC 6585\n 429: 'Too Many Requests',\n 431: 'Request Header Fields Too Large',\n 449: 'Retry With', # proprietary MS extension\n 451: 'Unavailable For Legal Reasons',\n 500: 'Internal Server Error',\n 501: 'Not Implemented',\n 502: 'Bad Gateway',\n 503: 'Service Unavailable',\n 504: 'Gateway Timeout',\n 505: 'HTTP Version Not Supported',\n 507: 'Insufficient Storage',\n 510: 'Not Extended'\n}\n\n\ndef wsgi_to_bytes(data):\n \"\"\"coerce wsgi unicode represented bytes to real ones\n\n \"\"\"\n if isinstance(data, bytes):\n return data\n return data.encode('latin1') # XXX: utf8 fallback?\n\n\ndef bytes_to_wsgi(data):\n assert isinstance(data, bytes), 'data must be bytes'\n if isinstance(data, str):\n return data\n else:\n return data.decode('latin1')\n\n\ndef quote_header_value(value, extra_chars='', allow_token=True):\n \"\"\"Quote a header value if necessary.\n\n .. versionadded:: 0.5\n\n :param value: the value to quote.\n :param extra_chars: a list of extra characters to skip quoting.\n :param allow_token: if this is enabled token values are returned\n unchanged.\n \"\"\"\n if isinstance(value, bytes):\n value = bytes_to_wsgi(value)\n value = str(value)\n if allow_token:\n token_chars = _token_chars | set(extra_chars)\n if set(value).issubset(token_chars):\n return value\n return '\"%s\"' % value.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n\n\ndef unquote_header_value(value, is_filename=False):\n r\"\"\"Unquotes a header value. (Reversal of :func:`quote_header_value`).\n This does not use the real unquoting but what browsers are actually\n using for quoting.\n\n .. versionadded:: 0.5\n\n :param value: the header value to unquote.\n \"\"\"\n if value and value[0] == value[-1] == '\"':\n # this is not the real unquoting, but fixing this so that the\n # RFC is met will result in bugs with internet explorer and\n # probably some other browsers as well. IE for example is\n # uploading files with \"C:\\foo\\bar.txt\" as filename\n value = value[1:-1]\n\n # if this is a filename and the starting characters look like\n # a UNC path, then just return the value without quotes. Using the\n # replace sequence below on a UNC path has the effect of turning\n # the leading double slash into a single slash and then\n # _fix_ie_filename() doesn't work correctly. See #458.\n if not is_filename or value[:2] != '\\\\\\\\':\n return value.replace('\\\\\\\\', '\\\\').replace('\\\\\"', '\"')\n return value\n\n\ndef dump_options_header(header, options):\n \"\"\"The reverse function to :func:`parse_options_header`.\n\n :param header: the header to dump\n :param options: a dict of options to append.\n \"\"\"\n segments = []\n if header is not None:\n segments.append(header)\n for key, value in iteritems(options):\n if value is None:\n segments.append(key)\n else:\n segments.append('%s=%s' % (key, quote_header_value(value)))\n return '; '.join(segments)\n\n\ndef dump_header(iterable, allow_token=True):\n \"\"\"Dump an HTTP header again. This is the reversal of\n :func:`parse_list_header`, :func:`parse_set_header` and\n :func:`parse_dict_header`. This also quotes strings that include an\n equals sign unless you pass it as dict of key, value pairs.\n\n >>> dump_header({'foo': 'bar baz'})\n 'foo=\"bar baz\"'\n >>> dump_header(('foo', 'bar baz'))\n 'foo, \"bar baz\"'\n\n :param iterable: the iterable or dict of values to quote.\n :param allow_token: if set to `False` tokens as values are disallowed.\n See :func:`quote_header_value` for more details.\n \"\"\"\n if isinstance(iterable, dict):\n items = []\n for key, value in iteritems(iterable):\n if value is None:\n items.append(key)\n else:\n items.append('%s=%s' % (\n key,\n quote_header_value(value, allow_token=allow_token)\n ))\n else:\n items = [quote_header_value(x, allow_token=allow_token)\n for x in iterable]\n return ', '.join(items)\n\n\ndef parse_list_header(value):\n \"\"\"Parse lists as described by RFC 2068 Section 2.\n\n In particular, parse comma-separated lists where the elements of\n the list may include quoted-strings. A quoted-string could\n contain a comma. A non-quoted string could have quotes in the\n middle. Quotes are removed automatically after parsing.\n\n It basically works like :func:`parse_set_header` just that items\n may appear multiple times and case sensitivity is preserved.\n\n The return value is a standard :class:`list`:\n\n >>> parse_list_header('token, \"quoted value\"')\n ['token', 'quoted value']\n\n To create a header from the :class:`list` again, use the\n :func:`dump_header` function.\n\n :param value: a string with a list header.\n :return: :class:`list`\n \"\"\"\n result = []\n for item in _parse_list_header(value):\n if item[:1] == item[-1:] == '\"':\n item = unquote_header_value(item[1:-1])\n result.append(item)\n return result\n\n\ndef parse_dict_header(value, cls=dict):\n \"\"\"Parse lists of key, value pairs as described by RFC 2068 Section 2 and\n convert them into a python dict (or any other mapping object created from\n the type with a dict like interface provided by the `cls` argument):\n\n >>> d = parse_dict_header('foo=\"is a fish\", bar=\"as well\"')\n >>> type(d) is dict\n True\n >>> sorted(d.items())\n [('bar', 'as well'), ('foo', 'is a fish')]\n\n If there is no value for a key it will be `None`:\n\n >>> parse_dict_header('key_without_value')\n {'key_without_value': None}\n\n To create a header from the :class:`dict` again, use the\n :func:`dump_header` function.\n\n .. versionchanged:: 0.9\n Added support for `cls` argument.\n\n :param value: a string with a dict header.\n :param cls: callable to use for storage of parsed results.\n :return: an instance of `cls`\n \"\"\"\n result = cls()\n if not isinstance(value, text_type):\n # XXX: validate\n value = bytes_to_wsgi(value)\n for item in _parse_list_header(value):\n if '=' not in item:\n result[item] = None\n continue\n name, value = item.split('=', 1)\n if value[:1] == value[-1:] == '\"':\n value = unquote_header_value(value[1:-1])\n result[name] = value\n return result\n\n\ndef parse_options_header(value, multiple=False):\n \"\"\"Parse a ``Content-Type`` like header into a tuple with the content\n type and the options:\n\n >>> parse_options_header('text/html; charset=utf8')\n ('text/html', {'charset': 'utf8'})\n\n This should not be used to parse ``Cache-Control`` like headers that use\n a slightly different format. For these headers use the\n :func:`parse_dict_header` function.\n\n .. versionadded:: 0.5\n\n :param value: the header to parse.\n :param multiple: Whether try to parse and return multiple MIME types\n :return: (mimetype, options) or (mimetype, options, mimetype, options, …)\n if multiple=True\n \"\"\"\n if not value:\n return '', {}\n\n result = []\n\n value = \",\" + value.replace(\"\\n\", \",\")\n while value:\n match = _option_header_start_mime_type.match(value)\n if not match:\n break\n result.append(match.group(1)) # mimetype\n options = {}\n # Parse options\n rest = match.group(2)\n while rest:\n optmatch = _option_header_piece_re.match(rest)\n if not optmatch:\n break\n option, encoding, _, option_value = optmatch.groups()\n option = unquote_header_value(option)\n if option_value is not None:\n option_value = unquote_header_value(\n option_value,\n option == 'filename')\n if encoding is not None:\n option_value = _unquote(option_value).decode(encoding)\n options[option] = option_value\n rest = rest[optmatch.end():]\n result.append(options)\n if multiple is False:\n return tuple(result)\n value = rest\n\n return tuple(result) if result else ('', {})\n\n\ndef parse_accept_header(value, cls=None):\n \"\"\"Parses an HTTP Accept-* header. This does not implement a complete\n valid algorithm but one that supports at least value and quality\n extraction.\n\n Returns a new :class:`Accept` object (basically a list of ``(value, quality)``\n tuples sorted by the quality with some additional accessor methods).\n\n The second parameter can be a subclass of :class:`Accept` that is created\n with the parsed values and returned.\n\n :param value: the accept header string to be parsed.\n :param cls: the wrapper class for the return value (can be\n :class:`Accept` or a subclass thereof)\n :return: an instance of `cls`.\n \"\"\"\n if cls is None:\n cls = Accept\n\n if not value:\n return cls(None)\n\n result = []\n for match in _accept_re.finditer(value):\n quality = match.group(2)\n if not quality:\n quality = 1\n else:\n quality = max(min(float(quality), 1), 0)\n result.append((match.group(1), quality))\n return cls(result)\n\n\ndef parse_cache_control_header(value, on_update=None, cls=None):\n \"\"\"Parse a cache control header. The RFC differs between response and\n request cache control, this method does not. It's your responsibility\n to not use the wrong control statements.\n\n .. versionadded:: 0.5\n The `cls` was added. If not specified an immutable\n :class:`~werkzeug.datastructures.RequestCacheControl` is returned.\n\n :param value: a cache control header to be parsed.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.CacheControl`\n object is changed.\n :param cls: the class for the returned object. By default\n :class:`~werkzeug.datastructures.RequestCacheControl` is used.\n :return: a `cls` object.\n \"\"\"\n if cls is None:\n cls = RequestCacheControl\n if not value:\n return cls(None, on_update)\n return cls(parse_dict_header(value), on_update)\n\n\ndef parse_set_header(value, on_update=None):\n \"\"\"Parse a set-like header and return a\n :class:`~werkzeug.datastructures.HeaderSet` object:\n\n >>> hs = parse_set_header('token, \"quoted value\"')\n\n The return value is an object that treats the items case-insensitively\n and keeps the order of the items:\n\n >>> 'TOKEN' in hs\n True\n >>> hs.index('quoted value')\n 1\n >>> hs\n HeaderSet(['token', 'quoted value'])\n\n To create a header from the :class:`HeaderSet` again, use the\n :func:`dump_header` function.\n\n :param value: a set header to be parsed.\n :param on_update: an optional callable that is called every time a\n value on the :class:`~werkzeug.datastructures.HeaderSet`\n object is changed.\n :return: a :class:`~werkzeug.datastructures.HeaderSet`\n \"\"\"\n if not value:\n return HeaderSet(None, on_update)\n return HeaderSet(parse_list_header(value), on_update)\n\n\ndef parse_authorization_header(value):\n \"\"\"Parse an HTTP basic/digest authorization header transmitted by the web\n browser. The return value is either `None` if the header was invalid or\n not given, otherwise an :class:`~werkzeug.datastructures.Authorization`\n object.\n\n :param value: the authorization header to parse.\n :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.\n \"\"\"\n if not value:\n return\n value = wsgi_to_bytes(value)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except ValueError:\n return\n if auth_type == b'basic':\n try:\n username, password = base64.b64decode(auth_info).split(b':', 1)\n except Exception:\n return\n return Authorization(\n 'basic', {\n 'username': to_unicode(username, _basic_auth_charset),\n 'password': to_unicode(password, _basic_auth_charset)\n }\n )\n elif auth_type == b'digest':\n auth_map = parse_dict_header(auth_info)\n for key in 'username', 'realm', 'nonce', 'uri', 'response':\n if key not in auth_map:\n return\n if 'qop' in auth_map:\n if not auth_map.get('nc') or not auth_map.get('cnonce'):\n return\n return Authorization('digest', auth_map)\n\n\ndef parse_www_authenticate_header(value, on_update=None):\n \"\"\"Parse an HTTP WWW-Authenticate header into a\n :class:`~werkzeug.datastructures.WWWAuthenticate` object.\n\n :param value: a WWW-Authenticate header to parse.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.WWWAuthenticate`\n object is changed.\n :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.\n \"\"\"\n if not value:\n return WWWAuthenticate(on_update=on_update)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except (ValueError, AttributeError):\n return WWWAuthenticate(value.strip().lower(), on_update=on_update)\n return WWWAuthenticate(auth_type, parse_dict_header(auth_info),\n on_update)\n\n\ndef parse_if_range_header(value):\n \"\"\"Parses an if-range header which can be an etag or a date. Returns\n a :class:`~werkzeug.datastructures.IfRange` object.\n\n .. versionadded:: 0.7\n \"\"\"\n if not value:\n return IfRange()\n date = parse_date(value)\n if date is not None:\n return IfRange(date=date)\n # drop weakness information\n return IfRange(unquote_etag(value)[0])\n\n\ndef parse_range_header(value, make_inclusive=True):\n \"\"\"Parses a range header into a :class:`~werkzeug.datastructures.Range`\n object. If the header is missing or malformed `None` is returned.\n `ranges` is a list of ``(start, stop)`` tuples where the ranges are\n non-inclusive.\n\n .. versionadded:: 0.7\n \"\"\"\n if not value or '=' not in value:\n return None\n\n ranges = []\n last_end = 0\n units, rng = value.split('=', 1)\n units = units.strip().lower()\n\n for item in rng.split(','):\n item = item.strip()\n if '-' not in item:\n return None\n if item.startswith('-'):\n if last_end < 0:\n return None\n try:\n begin = int(item)\n except ValueError:\n return None\n end = None\n last_end = -1\n elif '-' in item:\n begin, end = item.split('-', 1)\n begin = begin.strip()\n end = end.strip()\n if not begin.isdigit():\n return None\n begin = int(begin)\n if begin < last_end or last_end < 0:\n return None\n if end:\n if not end.isdigit():\n return None\n end = int(end) + 1\n if begin >= end:\n return None\n else:\n end = None\n last_end = end\n ranges.append((begin, end))\n\n return Range(units, ranges)\n\n\ndef parse_content_range_header(value, on_update=None):\n \"\"\"Parses a range header into a\n :class:`~werkzeug.datastructures.ContentRange` object or `None` if\n parsing is not possible.\n\n .. versionadded:: 0.7\n\n :param value: a content range header to be parsed.\n :param on_update: an optional callable that is called every time a value\n on the :class:`~werkzeug.datastructures.ContentRange`\n object is changed.\n \"\"\"\n if value is None:\n return None\n try:\n units, rangedef = (value or '').strip().split(None, 1)\n except ValueError:\n return None\n\n if '/' not in rangedef:\n return None\n rng, length = rangedef.split('/', 1)\n if length == '*':\n length = None\n elif length.isdigit():\n length = int(length)\n else:\n return None\n\n if rng == '*':\n return ContentRange(units, None, None, length, on_update=on_update)\n elif '-' not in rng:\n return None\n\n start, stop = rng.split('-', 1)\n try:\n start = int(start)\n stop = int(stop) + 1\n except ValueError:\n return None\n\n if is_byte_range_valid(start, stop, length):\n return ContentRange(units, start, stop, length, on_update=on_update)\n\n\ndef quote_etag(etag, weak=False):\n \"\"\"Quote an etag.\n\n :param etag: the etag to quote.\n :param weak: set to `True` to tag it \"weak\".\n \"\"\"\n if '\"' in etag:\n raise ValueError('invalid etag')\n etag = '\"%s\"' % etag\n if weak:\n etag = 'W/' + etag\n return etag\n\n\ndef unquote_etag(etag):\n \"\"\"Unquote a single etag:\n\n >>> unquote_etag('W/\"bar\"')\n ('bar', True)\n >>> unquote_etag('\"bar\"')\n ('bar', False)\n\n :param etag: the etag identifier to unquote.\n :return: a ``(etag, weak)`` tuple.\n \"\"\"\n if not etag:\n return None, None\n etag = etag.strip()\n weak = False\n if etag.startswith(('W/', 'w/')):\n weak = True\n etag = etag[2:]\n if etag[:1] == etag[-1:] == '\"':\n etag = etag[1:-1]\n return etag, weak\n\n\ndef parse_etags(value):\n \"\"\"Parse an etag header.\n\n :param value: the tag header to parse\n :return: an :class:`~werkzeug.datastructures.ETags` object.\n \"\"\"\n if not value:\n return ETags()\n strong = []\n weak = []\n end = len(value)\n pos = 0\n while pos < end:\n match = _etag_re.match(value, pos)\n if match is None:\n break\n is_weak, quoted, raw = match.groups()\n if raw == '*':\n return ETags(star_tag=True)\n elif quoted:\n raw = quoted\n if is_weak:\n weak.append(raw)\n else:\n strong.append(raw)\n pos = match.end()\n return ETags(strong, weak)\n\n\ndef generate_etag(data):\n \"\"\"Generate an etag for some data.\"\"\"\n return md5(data).hexdigest()\n\n\ndef parse_date(value):\n \"\"\"Parse one of the following date formats into a datetime object:\n\n .. sourcecode:: text\n\n Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123\n Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036\n Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format\n\n If parsing fails the return value is `None`.\n\n :param value: a string with a supported date format.\n :return: a :class:`datetime.datetime` object.\n \"\"\"\n if value:\n t = parsedate_tz(value.strip())\n if t is not None:\n try:\n year = t[0]\n # unfortunately that function does not tell us if two digit\n # years were part of the string, or if they were prefixed\n # with two zeroes. So what we do is to assume that 69-99\n # refer to 1900, and everything below to 2000\n if year >= 0 and year <= 68:\n year += 2000\n elif year >= 69 and year <= 99:\n year += 1900\n return datetime(*((year,) + t[1:7])) - \\\n timedelta(seconds=t[-1] or 0)\n except (ValueError, OverflowError):\n return None\n\n\ndef _dump_date(d, delim):\n \"\"\"Used for `http_date` and `cookie_date`.\"\"\"\n if d is None:\n d = gmtime()\n elif isinstance(d, datetime):\n d = d.utctimetuple()\n elif isinstance(d, (integer_types, float)):\n d = gmtime(d)\n return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (\n ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],\n d.tm_mday, delim,\n ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec')[d.tm_mon - 1],\n delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec\n )\n\n\ndef cookie_date(expires=None):\n \"\"\"Formats the time to ensure compatibility with Netscape's cookie\n standard.\n\n Accepts a floating point number expressed in seconds since the epoch in, a\n datetime object or a timetuple. All times in UTC. The :func:`parse_date`\n function can be used to parse such a date.\n\n Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.\n\n :param expires: If provided that date is used, otherwise the current.\n \"\"\"\n return _dump_date(expires, '-')\n\n\ndef http_date(timestamp=None):\n \"\"\"Formats the time to match the RFC1123 date format.\n\n Accepts a floating point number expressed in seconds since the epoch in, a\n datetime object or a timetuple. All times in UTC. The :func:`parse_date`\n function can be used to parse such a date.\n\n Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.\n\n :param timestamp: If provided that date is used, otherwise the current.\n \"\"\"\n return _dump_date(timestamp, ' ')\n\n\ndef parse_age(value=None):\n \"\"\"Parses a base-10 integer count of seconds into a timedelta.\n\n If parsing fails, the return value is `None`.\n\n :param value: a string consisting of an integer represented in base-10\n :return: a :class:`datetime.timedelta` object or `None`.\n \"\"\"\n if not value:\n return None\n try:\n seconds = int(value)\n except ValueError:\n return None\n if seconds < 0:\n return None\n try:\n return timedelta(seconds=seconds)\n except OverflowError:\n return None\n\n\ndef dump_age(age=None):\n \"\"\"Formats the duration as a base-10 integer.\n\n :param age: should be an integer number of seconds,\n a :class:`datetime.timedelta` object, or,\n if the age is unknown, `None` (default).\n \"\"\"\n if age is None:\n return\n if isinstance(age, timedelta):\n # do the equivalent of Python 2.7's timedelta.total_seconds(),\n # but disregarding fractional seconds\n age = age.seconds + (age.days * 24 * 3600)\n\n age = int(age)\n if age < 0:\n raise ValueError('age cannot be negative')\n\n return str(age)\n\n\ndef is_resource_modified(environ, etag=None, data=None, last_modified=None,\n ignore_if_range=True):\n \"\"\"Convenience method for conditional requests.\n\n :param environ: the WSGI environment of the request to be checked.\n :param etag: the etag for the response for comparison.\n :param data: or alternatively the data of the response to automatically\n generate an etag using :func:`generate_etag`.\n :param last_modified: an optional date of the last modification.\n :param ignore_if_range: If `False`, `If-Range` header will be taken into\n account.\n :return: `True` if the resource was modified, otherwise `False`.\n \"\"\"\n if etag is None and data is not None:\n etag = generate_etag(data)\n elif data is not None:\n raise TypeError('both data and etag given')\n if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):\n return False\n\n unmodified = False\n if isinstance(last_modified, string_types):\n last_modified = parse_date(last_modified)\n\n # ensure that microsecond is zero because the HTTP spec does not transmit\n # that either and we might have some false positives. See issue #39\n if last_modified is not None:\n last_modified = last_modified.replace(microsecond=0)\n\n if_range = None\n if not ignore_if_range and 'HTTP_RANGE' in environ:\n # https://tools.ietf.org/html/rfc7233#section-3.2\n # A server MUST ignore an If-Range header field received in a request\n # that does not contain a Range header field.\n if_range = parse_if_range_header(environ.get('HTTP_IF_RANGE'))\n\n if if_range is not None and if_range.date is not None:\n modified_since = if_range.date\n else:\n modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))\n\n if modified_since and last_modified and last_modified <= modified_since:\n unmodified = True\n\n if etag:\n etag, _ = unquote_etag(etag)\n if if_range is not None and if_range.etag is not None:\n unmodified = parse_etags(if_range.etag).contains(etag)\n else:\n if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))\n if if_none_match:\n # https://tools.ietf.org/html/rfc7232#section-3.2\n # \"A recipient MUST use the weak comparison function when comparing\n # entity-tags for If-None-Match\"\n unmodified = if_none_match.contains_weak(etag)\n\n # https://tools.ietf.org/html/rfc7232#section-3.1\n # \"Origin server MUST use the strong comparison function when\n # comparing entity-tags for If-Match\"\n if_match = parse_etags(environ.get('HTTP_IF_MATCH'))\n if if_match:\n unmodified = not if_match.is_strong(etag)\n\n return not unmodified\n\n\ndef remove_entity_headers(headers, allowed=('expires', 'content-location')):\n \"\"\"Remove all entity headers from a list or :class:`Headers` object. This\n operation works in-place. `Expires` and `Content-Location` headers are\n by default not removed. The reason for this is :rfc:`2616` section\n 10.3.5 which specifies some entity headers that should be sent.\n\n .. versionchanged:: 0.5\n added `allowed` parameter.\n\n :param headers: a list or :class:`Headers` object.\n :param allowed: a list of headers that should still be allowed even though\n they are entity headers.\n \"\"\"\n allowed = set(x.lower() for x in allowed)\n headers[:] = [(key, value) for key, value in headers if\n not is_entity_header(key) or key.lower() in allowed]\n\n\ndef remove_hop_by_hop_headers(headers):\n \"\"\"Remove all HTTP/1.1 \"Hop-by-Hop\" headers from a list or\n :class:`Headers` object. This operation works in-place.\n\n .. versionadded:: 0.5\n\n :param headers: a list or :class:`Headers` object.\n \"\"\"\n headers[:] = [(key, value) for key, value in headers if\n not is_hop_by_hop_header(key)]\n\n\ndef is_entity_header(header):\n \"\"\"Check if a header is an entity header.\n\n .. versionadded:: 0.5\n\n :param header: the header to test.\n :return: `True` if it's an entity header, `False` otherwise.\n \"\"\"\n return header.lower() in _entity_headers\n\n\ndef is_hop_by_hop_header(header):\n \"\"\"Check if a header is an HTTP/1.1 \"Hop-by-Hop\" header.\n\n .. versionadded:: 0.5\n\n :param header: the header to test.\n :return: `True` if it's an HTTP/1.1 \"Hop-by-Hop\" header, `False` otherwise.\n \"\"\"\n return header.lower() in _hop_by_hop_headers\n\n\ndef parse_cookie(header, charset='utf-8', errors='replace', cls=None):\n \"\"\"Parse a cookie. Either from a string or WSGI environ.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a\n :exc:`HTTPUnicodeError` is raised.\n\n .. versionchanged:: 0.5\n This function now returns a :class:`TypeConversionDict` instead of a\n regular dict. The `cls` parameter was added.\n\n :param header: the header to be used to parse the cookie. Alternatively\n this can be a WSGI environment.\n :param charset: the charset for the cookie values.\n :param errors: the error behavior for the charset decoding.\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`TypeConversionDict` is\n used.\n \"\"\"\n if isinstance(header, dict):\n header = header.get('HTTP_COOKIE', '')\n elif header is None:\n header = ''\n\n # If the value is an unicode string it's mangled through latin1. This\n # is done because on PEP 3333 on Python 3 all headers are assumed latin1\n # which however is incorrect for cookies, which are sent in page encoding.\n # As a result we\n if isinstance(header, text_type):\n header = header.encode('latin1', 'replace')\n\n if cls is None:\n cls = TypeConversionDict\n\n def _parse_pairs():\n for key, val in _cookie_parse_impl(header):\n key = to_unicode(key, charset, errors, allow_none_charset=True)\n if not key:\n continue\n val = to_unicode(val, charset, errors, allow_none_charset=True)\n yield try_coerce_native(key), val\n\n return cls(_parse_pairs())\n\n\ndef dump_cookie(key, value='', max_age=None, expires=None, path='/',\n domain=None, secure=False, httponly=False,\n charset='utf-8', sync_expires=True, max_size=4093,\n samesite=None):\n \"\"\"Creates a new Set-Cookie header without the ``Set-Cookie`` prefix\n The parameters are the same as in the cookie Morsel object in the\n Python standard library but it accepts unicode data, too.\n\n On Python 3 the return value of this function will be a unicode\n string, on Python 2 it will be a native string. In both cases the\n return value is usually restricted to ascii as the vast majority of\n values are properly escaped, but that is no guarantee. If a unicode\n string is returned it's tunneled through latin1 as required by\n PEP 3333.\n\n The return value is not ASCII safe if the key contains unicode\n characters. This is technically against the specification but\n happens in the wild. It's strongly recommended to not use\n non-ASCII values for the keys.\n\n :param max_age: should be a number of seconds, or `None` (default) if\n the cookie should last only as long as the client's\n browser session. Additionally `timedelta` objects\n are accepted, too.\n :param expires: should be a `datetime` object or unix timestamp.\n :param path: limits the cookie to a given path, per default it will\n span the whole domain.\n :param domain: Use this if you want to set a cross-domain cookie. For\n example, ``domain=\".example.com\"`` will set a cookie\n that is readable by the domain ``www.example.com``,\n ``foo.example.com`` etc. Otherwise, a cookie will only\n be readable by the domain that set it.\n :param secure: The cookie will only be available via HTTPS\n :param httponly: disallow JavaScript to access the cookie. This is an\n extension to the cookie standard and probably not\n supported by all browsers.\n :param charset: the encoding for unicode values.\n :param sync_expires: automatically set expires if max_age is defined\n but expires not.\n :param max_size: Warn if the final header value exceeds this size. The\n default, 4093, should be safely `supported by most browsers\n <cookie_>`_. Set to 0 to disable this check.\n :param samesite: Limits the scope of the cookie such that it will only\n be attached to requests if those requests are \"same-site\".\n\n .. _`cookie`: http://browsercookielimits.squawky.net/\n \"\"\"\n key = to_bytes(key, charset)\n value = to_bytes(value, charset)\n\n if path is not None:\n path = iri_to_uri(path, charset)\n domain = _make_cookie_domain(domain)\n if isinstance(max_age, timedelta):\n max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds\n if expires is not None:\n if not isinstance(expires, string_types):\n expires = cookie_date(expires)\n elif max_age is not None and sync_expires:\n expires = to_bytes(cookie_date(time() + max_age))\n\n samesite = samesite.title() if samesite else None\n if samesite not in ('Strict', 'Lax', None):\n raise ValueError(\"invalid SameSite value; must be 'Strict', 'Lax' or None\")\n\n buf = [key + b'=' + _cookie_quote(value)]\n\n # XXX: In theory all of these parameters that are not marked with `None`\n # should be quoted. Because stdlib did not quote it before I did not\n # want to introduce quoting there now.\n for k, v, q in ((b'Domain', domain, True),\n (b'Expires', expires, False,),\n (b'Max-Age', max_age, False),\n (b'Secure', secure, None),\n (b'HttpOnly', httponly, None),\n (b'Path', path, False),\n (b'SameSite', samesite, False)):\n if q is None:\n if v:\n buf.append(k)\n continue\n\n if v is None:\n continue\n\n tmp = bytearray(k)\n if not isinstance(v, (bytes, bytearray)):\n v = to_bytes(text_type(v), charset)\n if q:\n v = _cookie_quote(v)\n tmp += b'=' + v\n buf.append(bytes(tmp))\n\n # The return value will be an incorrectly encoded latin1 header on\n # Python 3 for consistency with the headers object and a bytestring\n # on Python 2 because that's how the API makes more sense.\n rv = b'; '.join(buf)\n if not PY2:\n rv = rv.decode('latin1')\n\n # Warn if the final value of the cookie is less than the limit. If the\n # cookie is too large, then it may be silently ignored, which can be quite\n # hard to debug.\n cookie_size = len(rv)\n\n if max_size and cookie_size > max_size:\n value_size = len(value)\n warnings.warn(\n 'The \"{key}\" cookie is too large: the value was {value_size} bytes'\n ' but the header required {extra_size} extra bytes. The final size'\n ' was {cookie_size} bytes but the limit is {max_size} bytes.'\n ' Browsers may silently ignore cookies larger than this.'.format(\n key=key,\n value_size=value_size,\n extra_size=cookie_size - value_size,\n cookie_size=cookie_size,\n max_size=max_size\n ),\n stacklevel=2\n )\n\n return rv\n\n\ndef is_byte_range_valid(start, stop, length):\n \"\"\"Checks if a given byte content range is valid for the given length.\n\n .. versionadded:: 0.7\n \"\"\"\n if (start is None) != (stop is None):\n return False\n elif start is None:\n return length is None or length >= 0\n elif length is None:\n return 0 <= start < stop\n elif start >= stop:\n return False\n return 0 <= start < length\n\n\n# circular dependency fun\nfrom werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \\\n WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \\\n RequestCacheControl\n\n\n# DEPRECATED\n# backwards compatible imports\nfrom werkzeug.datastructures import ( # noqa\n MIMEAccept, CharsetAccept, LanguageAccept, Headers\n)\nfrom werkzeug.urls import iri_to_uri\n", "path": "werkzeug/http.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index a3753ff79..3d25a164b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -116,6 +116,21 @@ Unreleased versions of Lighttpd. ``LighttpdCGIRootFix`` was renamed to ``CGIRootFix`` in 0.9. The old name emits a deprecation warning and will be removed in the next version. (`#1141`_) +- The test :class:`~test.Client` redirect handling is rewritten. + (`#1402`_) + + - The redirect environ is copied from the initial request environ. + - Script root and path are correctly distinguished when + redirecting to a path under the root. + - The HEAD method is not changed to GET. + - 307 and 308 codes preserve the method and body. All others + ignore the body and related headers. + - Headers are passed to the new request for all codes, following + what browsers do. + - :class:`~test.EnvironBuilder` sets the content type and length + headers in addition to the WSGI keys when detecting them from + the data. + .. _`#209`: https://github.com/pallets/werkzeug/pull/209 .. _`#609`: https://github.com/pallets/werkzeug/pull/609 @@ -164,6 +179,7 @@ Unreleased .. _`#1393`: https://github.com/pallets/werkzeug/pull/1393 .. _`#1395`: https://github.com/pallets/werkzeug/pull/1395 .. _`#1401`: https://github.com/pallets/werkzeug/pull/1401 +.. _`#1402`: https://github.com/pallets/werkzeug/pull/1402 Version 0.14.1 diff --git a/tests/test_test.py b/tests/test_test.py index 19f651873..0854f1002 100644 --- a/tests/test_test.py +++ b/tests/test_test.py @@ -56,17 +56,6 @@ def redirect_with_get_app(environ, start_response): return response(environ, start_response) -def redirect_with_post_app(environ, start_response): - req = Request(environ) - if req.url == 'http://localhost/some/redirect/': - assert req.method == 'GET', 'request should be GET' - assert not req.form, 'request should not have data' - response = Response('current url: %s' % req.url) - else: - response = redirect('http://localhost/some/redirect/') - return response(environ, start_response) - - def external_redirect_demo_app(environ, start_response): response = redirect('http://example.com/') return response(environ, start_response) @@ -333,6 +322,22 @@ def test_create_environ_query_string_error(): create_environ('/foo?bar=baz', query_string={'a': 'b'}) +def test_builder_from_environ(): + environ = create_environ( + "/foo", + base_url="https://example.com/base", + query_string={"name": "Werkzeug"}, + data={"foo": "bar"}, + headers={"X-Foo": "bar"} + ) + builder = EnvironBuilder.from_environ(environ) + try: + new_environ = builder.get_environ() + finally: + builder.close() + assert new_environ == environ + + def test_file_closing(): closed = [] @@ -390,21 +395,35 @@ def local_redirect_app(environ, start_response): strict_eq(resp.data, b'current path: /to/location') -def test_follow_redirect_with_post_307(): - def redirect_with_post_307_app(environ, start_response): - req = Request(environ) - if req.url == 'http://localhost/some/redirect/': - assert req.method == 'POST', 'request should be POST' - assert not req.form, 'request should not have data' - response = Response('current url: %s' % req.url) - else: - response = redirect('http://localhost/some/redirect/', code=307) - return response(environ, start_response) [email protected]( + ("code", "keep"), + ((302, False), (301, False), (307, True), (308, True)), +) +def test_follow_redirect_body(code, keep): + @Request.application + def app(request): + if request.url == "http://localhost/some/redirect/": + assert request.method == "POST" if keep else "GET" + assert request.headers["X-Foo"] == "bar" + + if keep: + assert request.form["foo"] == "bar" + else: + assert not request.form + + return Response("current url: %s" % request.url) + + return redirect("http://localhost/some/redirect/", code=code) - c = Client(redirect_with_post_307_app, response_wrapper=BaseResponse) - resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42') - assert resp.status_code == 200 - assert resp.data == b'current url: http://localhost/some/redirect/' + c = Client(app, response_wrapper=BaseResponse) + response = c.post( + "/", + follow_redirects=True, + data={"foo": "bar"}, + headers={"X-Foo": "bar"}, + ) + assert response.status_code == 200 + assert response.data == b"current url: http://localhost/some/redirect/" def test_follow_external_redirect(): @@ -436,11 +455,17 @@ def test_follow_redirect_loop(): c.get('/', follow_redirects=True) -def test_follow_redirect_with_post(): - c = Client(redirect_with_post_app, response_wrapper=BaseResponse) - resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42') - strict_eq(resp.status_code, 200) - strict_eq(resp.data, b'current url: http://localhost/some/redirect/') +def test_follow_redirect_non_root_base_url(): + @Request.application + def app(request): + if request.path == "/redirect": + return redirect("done") + + return Response(request.path) + + c = Client(app, response_wrapper=Response) + response = c.get("/redirect", base_url="http://localhost/other", follow_redirects=True) + assert response.data == b"/done" def test_path_info_script_name_unquoting(): diff --git a/werkzeug/http.py b/werkzeug/http.py index 2e3adff6d..0e96d6b60 100644 --- a/werkzeug/http.py +++ b/werkzeug/http.py @@ -121,6 +121,7 @@ 304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect', + 308: 'Permanent Redirect', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', # unused diff --git a/werkzeug/test.py b/werkzeug/test.py index 1e1301139..b5a93b73b 100644 --- a/werkzeug/test.py +++ b/werkzeug/test.py @@ -32,10 +32,17 @@ from werkzeug.wrappers import BaseRequest from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \ url_unparse, url_parse -from werkzeug.wsgi import get_host, get_current_url, ClosingIterator +from werkzeug.wsgi import get_current_url, ClosingIterator from werkzeug.utils import dump_cookie, get_content_type -from werkzeug.datastructures import FileMultiDict, MultiDict, \ - CombinedMultiDict, Headers, FileStorage, CallbackDict +from werkzeug.datastructures import ( + FileMultiDict, + MultiDict, + CombinedMultiDict, + Headers, + FileStorage, + CallbackDict, + EnvironHeaders, +) from werkzeug.http import dump_options_header, parse_options_header @@ -355,6 +362,35 @@ def __init__(self, path='/', base_url=None, query_string=None, if mimetype is not None: self.mimetype = mimetype + @classmethod + def from_environ(cls, environ, **kwargs): + """Turn an environ dict back into a builder. Any extra kwargs + override the args extracted from the environ. + + .. versionadded:: 0.15 + """ + headers = Headers(EnvironHeaders(environ)) + out = { + "path": environ["PATH_INFO"], + "base_url": cls._make_base_url( + environ["wsgi.url_scheme"], + headers.pop("Host"), + environ["SCRIPT_NAME"], + ), + "query_string": environ["QUERY_STRING"], + "method": environ["REQUEST_METHOD"], + "input_stream": environ["wsgi.input"], + "content_type": headers.pop("Content-Type", None), + "content_length": headers.pop("Content-Length", None), + "errors_stream": environ["wsgi.errors"], + "multithread": environ["wsgi.multithread"], + "multiprocess": environ["wsgi.multiprocess"], + "run_once": environ["wsgi.run_once"], + "headers": headers, + } + out.update(kwargs) + return cls(**out) + def _add_file_from_data(self, key, value): """Called in the EnvironBuilder to add files from the data dict.""" if isinstance(value, tuple): @@ -372,11 +408,19 @@ def _add_file_from_data(self, key, value): else: self.files.add_file(key, value) - def _get_base_url(self): - return url_unparse((self.url_scheme, self.host, - self.script_root, '', '')).rstrip('/') + '/' + @staticmethod + def _make_base_url(scheme, host, script_root): + return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/" + + @property + def base_url(self): + """The base URL is used to extract the URL scheme, host name, + port, and root path. + """ + return self._make_base_url(self.url_scheme, self.host, self.script_root) - def _set_base_url(self, value): + @base_url.setter + def base_url(self, value): if value is None: scheme = 'http' netloc = 'localhost' @@ -390,12 +434,6 @@ def _set_base_url(self, value): self.host = netloc self.url_scheme = scheme - base_url = property(_get_base_url, _set_base_url, doc=''' - The base URL is a URL that is used to extract the WSGI - URL scheme, host (server name + server port) and the - script root (`SCRIPT_NAME`).''') - del _get_base_url, _set_base_url - def _get_content_type(self): ct = self.headers.get('Content-Type') if ct is None and not self._input_stream: @@ -571,7 +609,13 @@ def close(self): self.closed = True def get_environ(self): - """Return the built environ.""" + """Return the built environ. + + .. versionchanged:: 0.15 + The content type and length headers are set based on + input stream detection. Previously this only set the WSGI + keys. + """ input_stream = self.input_stream content_length = self.content_length @@ -624,15 +668,23 @@ def _path_encode(x): 'wsgi.multiprocess': self.multiprocess, 'wsgi.run_once': self.run_once }) + + headers = self.headers.copy() + if content_type is not None: result['CONTENT_TYPE'] = content_type + headers.set("Content-Type", content_type) + if content_length is not None: result['CONTENT_LENGTH'] = str(content_length) + headers.set("Content-Length", content_length) - for key, value in self.headers.to_wsgi_list(): + for key, value in headers.to_wsgi_list(): result['HTTP_%s' % key.upper().replace('-', '_')] = value + if self.environ_overrides: result.update(self.environ_overrides) + return result def get_request(self, cls=None): @@ -724,43 +776,65 @@ def run_wsgi_app(self, environ, buffered=False): return rv def resolve_redirect(self, response, new_location, environ, buffered=False): - """Resolves a single redirect and triggers the request again - directly on this redirect client. + """Perform a new request to the location given by the redirect + response to the previous request. """ - scheme, netloc, script_root, qs, anchor = url_parse(new_location) - base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/' - - cur_server_name = netloc.split(':', 1)[0].split('.') - real_server_name = get_host(environ).rsplit(':', 1)[0].split('.') - if cur_server_name == ['']: - # this is a local redirect having autocorrect_location_header=False - cur_server_name = real_server_name - base_url = EnvironBuilder(environ).base_url - - if self.allow_subdomain_redirects: - allowed = cur_server_name[-len(real_server_name):] == real_server_name + scheme, netloc, path, qs, anchor = url_parse(new_location) + builder = EnvironBuilder.from_environ(environ, query_string=qs) + + to_name_parts = netloc.split(':', 1)[0].split(".") + from_name_parts = builder.server_name.split(".") + + if to_name_parts != [""]: + # The new location has a host, use it for the base URL. + builder.url_scheme = scheme + builder.host = netloc else: - allowed = cur_server_name == real_server_name + # A local redirect with autocorrect_location_header=False + # doesn't have a host, so use the request's host. + to_name_parts = from_name_parts + + # Explain why a redirect to a different server name won't be followed. + if to_name_parts != from_name_parts: + if to_name_parts[-len(from_name_parts):] == from_name_parts: + if not self.allow_subdomain_redirects: + raise RuntimeError("Following subdomain redirects is not enabled.") + else: + raise RuntimeError("Following external redirects is not supported.") - if not allowed: - raise RuntimeError('%r does not support redirect to ' - 'external targets' % self.__class__) + path_parts = path.split("/") + root_parts = builder.script_root.split("/") - status_code = int(response[1].split(None, 1)[0]) - if status_code == 307: - method = environ['REQUEST_METHOD'] + if path_parts[:len(root_parts)] == root_parts: + # Strip the script root from the path. + builder.path = path[len(builder.script_root):] else: - method = 'GET' + # The new location is not under the script root, so use the + # whole path and clear the previous root. + builder.path = path + builder.script_root = "" - # For redirect handling we temporarily disable the response - # wrapper. This is not threadsafe but not a real concern - # since the test client must not be shared anyways. + status_code = int(response[1].split(None, 1)[0]) + + # Only 307 and 308 preserve all of the original request. + if status_code not in {307, 308}: + # HEAD is preserved, everything else becomes GET. + if builder.method != "HEAD": + builder.method = "GET" + + # Clear the body and the headers that describe it. + builder.input_stream = None + builder.content_type = None + builder.content_length = None + builder.headers.pop("Transfer-Encoding", None) + + # Disable the response wrapper while handling redirects. Not + # thread safe, but the client should not be shared anyway. old_response_wrapper = self.response_wrapper self.response_wrapper = None + try: - return self.open(path=script_root, base_url=base_url, - query_string=qs, as_tuple=True, - buffered=buffered, method=method) + return self.open(builder, as_tuple=True, buffered=buffered) finally: self.response_wrapper = old_response_wrapper @@ -811,8 +885,10 @@ def open(self, *args, **kwargs): redirect_chain = [] while 1: status_code = int(response[1].split(None, 1)[0]) - if status_code not in (301, 302, 303, 305, 307) \ - or not follow_redirects: + if ( + status_code not in {301, 302, 303, 305, 307, 308} + or not follow_redirects + ): break new_location = response[2]['location'] new_redirect_entry = (new_location, status_code)
biopython__biopython-877
Enforce basic PEP8 style with TravisCI See also #493 for using a git pre-commit hook. I would like to have the git pre-commit hook be quite strict, requiring explicit by-passing with `--no-verify` on a case by case basis, while the TravisCI check should be less onerous but mandatory. Right now something like this in `.travis.yml` could work: ``` - pip install pep8 - pep8 --ignore E123,E126,E128,E501 BioSQL/ - pep8 --ignore E123,E126,E127,E128,E402,E501 Scripts/ - pep8 --ignore E127,E128,E501 Doc/examples/ - pep8 --ignore E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E231,E241,E302,E402,E501,E731,W291,W293,W391,W503 Bio/ ``` I have no particular preference over using the command line tool `pep8` versus `flake8` or anything else - whatever is quickest would be practical if they check the same things. Medium term I'd like us to work towards removing as many of these PEP8 violations as possible and make the TravisCI checking stricter over time. This adds about 20 to 25s to the TravisCI run, but only really needs to be done once rather than for every version of Python (possibly twice for Python 2 and Python 3?). An alternative might by combining TravisCI with Tox along the lines used in https://github.com/galaxyproject/planemo/blob/master/.travis.yml and https://github.com/galaxyproject/planemo/blob/master/tox.ini
[ { "content": "# Copyright (C) 2002, Thomas Hamelryck ([email protected])\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Use the DSSP program to calculate secondary structure and accessibility.\n\nYou need to have a working version of DSSP (and a license, free for academic\nuse) in order to use this. For DSSP, see U{http://swift.cmbi.ru.nl/gv/dssp/}.\n\nThe DSSP codes for secondary structure used here are:\n\n H\n Alpha helix (4-12)\n B\n Isolated beta-bridge residue\n E\n Strand\n G\n 3-10 helix\n I\n pi helix\n T\n Turn\n S\n Bend\n \\-\n None\n\nThe following Accessible surface area (ASA) values can be used, defaulting\nto the Sander and Rost values:\n\n Miller\n Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6\n Sander\n Sander and Rost 1994 http://dx.doi.org/10.1002/prot.340200303\n Wilke\n Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport re\nfrom Bio._py3k import StringIO\nimport subprocess\nimport warnings\n\nfrom Bio.Data import SCOPData\n\nfrom Bio.PDB.AbstractPropertyMap import AbstractResiduePropertyMap\nfrom Bio.PDB.PDBExceptions import PDBException\nfrom Bio.PDB.PDBParser import PDBParser\n\n\n# Match C in DSSP\n_dssp_cys = re.compile('[a-z]')\n\n# Maximal ASA of amino acids\n# Used for relative accessibility\n\nresidue_max_acc = {\n # Miller max acc: Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6\n # Wilke: Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635\n # Sander: Sander & Rost 1994 http://dx.doi.org/10.1002/prot.340200303\n 'Miller': {\n 'ALA': 113.0, 'ARG': 241.0, 'ASN': 158.0, 'ASP': 151.0,\n 'CYS': 140.0, 'GLN': 189.0, 'GLU': 183.0, 'GLY': 85.0,\n 'HIS': 194.0, 'ILE': 182.0, 'LEU': 180.0, 'LYS': 211.0,\n 'MET': 204.0, 'PHE': 218.0, 'PRO': 143.0, 'SER': 122.0,\n 'THR': 146.0, 'TRP': 259.0, 'TYR': 229.0, 'VAL': 160.0\n },\n 'Wilke': {\n 'ALA': 129.0, 'ARG': 274.0, 'ASN': 195.0, 'ASP': 193.0,\n 'CYS': 167.0, 'GLN': 225.0, 'GLU': 223.0, 'GLY': 104.0,\n 'HIS': 224.0, 'ILE': 197.0, 'LEU': 201.0, 'LYS': 236.0,\n 'MET': 224.0, 'PHE': 240.0, 'PRO': 159.0, 'SER': 155.0,\n 'THR': 172.0, 'TRP': 285.0, 'TYR': 263.0, 'VAL': 174.0\n },\n 'Sander': {\n 'ALA': 126.0, 'ARG': 248.0, 'ASN': 157.0, 'ASP': 163.0,\n 'CYS': 135.0, 'GLN': 198.0, 'GLU': 194.0, 'GLY': 84.0,\n 'HIS': 184.0, 'ILE': 169.0, 'LEU': 164.0, 'LYS': 205.0,\n 'MET': 188.0, 'PHE': 197.0, 'PRO': 136.0, 'SER': 130.0,\n 'THR': 142.0, 'TRP': 227.0, 'TYR': 222.0, 'VAL': 142.0\n }\n}\n\n\ndef ss_to_index(ss):\n \"\"\"Secondary structure symbol to index.\n\n H=0\n E=1\n C=2\n \"\"\"\n if ss == 'H':\n return 0\n if ss == 'E':\n return 1\n if ss == 'C':\n return 2\n assert 0\n\n\ndef dssp_dict_from_pdb_file(in_file, DSSP=\"dssp\"):\n \"\"\"Create a DSSP dictionary from a PDB file.\n\n Example:\n --------\n >>> dssp_dict=dssp_dict_from_pdb_file(\"1fat.pdb\")\n >>> aa, ss, acc=dssp_dict[('A', 1)]\n\n Parameters\n ----------\n in_file : string\n pdb file\n\n DSSP : string\n DSSP executable (argument to os.system)\n\n Returns\n -------\n (out_dict, keys) : tuple\n a dictionary that maps (chainid, resid) to\n amino acid type, secondary structure code and\n accessibility.\n \"\"\"\n # Using universal newlines is important on Python 3, this\n # gives unicode handles rather than bytes handles.\n p = subprocess.Popen([DSSP, in_file], universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n\n # Alert user for errors\n if err.strip():\n warnings.warn(err)\n if not out.strip():\n raise Exception('DSSP failed to produce an output')\n\n out_dict, keys = _make_dssp_dict(StringIO(out))\n return out_dict, keys\n\n\ndef make_dssp_dict(filename):\n \"\"\"DSSP dictionary mapping identifers to properties.\n\n Return a DSSP dictionary that maps (chainid, resid) to\n aa, ss and accessibility, from a DSSP file. ::\n\n Parameters\n ----------\n filename : string\n the DSSP output file\n \"\"\"\n with open(filename, \"r\") as handle:\n return _make_dssp_dict(handle)\n\n\ndef _make_dssp_dict(handle):\n \"\"\"Internal function used by mask_dssp_dict (PRIVATE).\n\n Return a DSSP dictionary that maps (chainid, resid) to an amino acid,\n secondary structure symbol, solvent accessibility value, and hydrogen bond\n information (relative dssp indices and hydrogen bond energies) from an open\n DSSP file object. ::\n\n Parameters\n ----------\n handle : file\n the open DSSP output file handle\n \"\"\"\n dssp = {}\n start = 0\n keys = []\n for l in handle.readlines():\n sl = l.split()\n if len(sl) < 2:\n continue\n if sl[1] == \"RESIDUE\":\n # Start parsing from here\n start = 1\n continue\n if not start:\n continue\n if l[9] == \" \":\n # Skip -- missing residue\n continue\n\n dssp_index = int(l[:5])\n resseq = int(l[5:10])\n icode = l[10]\n chainid = l[11]\n aa = l[13]\n ss = l[16]\n if ss == \" \":\n ss = \"-\"\n try:\n NH_O_1_relidx = int(l[38:45])\n NH_O_1_energy = float(l[46:50])\n O_NH_1_relidx = int(l[50:56])\n O_NH_1_energy = float(l[57:61])\n NH_O_2_relidx = int(l[61:67])\n NH_O_2_energy = float(l[68:72])\n O_NH_2_relidx = int(l[72:78])\n O_NH_2_energy = float(l[79:83])\n\n acc = int(l[34:38])\n phi = float(l[103:109])\n psi = float(l[109:115])\n except ValueError as exc:\n # DSSP output breaks its own format when there are >9999\n # residues, since only 4 digits are allocated to the seq num\n # field. See 3kic chain T res 321, 1vsy chain T res 6077.\n # Here, look for whitespace to figure out the number of extra\n # digits, and shift parsing the rest of the line by that amount.\n if l[34] != ' ':\n shift = l[34:].find(' ')\n\n NH_O_1_relidx = int(l[38 + shift:45 + shift])\n NH_O_1_energy = float(l[46 + shift:50 + shift])\n O_NH_1_relidx = int(l[50 + shift:56 + shift])\n O_NH_1_energy = float(l[57 + shift:61 + shift])\n NH_O_2_relidx = int(l[61 + shift:67 + shift])\n NH_O_2_energy = float(l[68 + shift:72 + shift])\n O_NH_2_relidx = int(l[72 + shift:78 + shift])\n O_NH_2_energy = float(l[79 + shift:83 + shift])\n\n acc = int((l[34 + shift:38 + shift]))\n phi = float(l[103 + shift:109 + shift])\n psi = float(l[109 + shift:115 + shift])\n else:\n raise ValueError(exc)\n res_id = (\" \", resseq, icode)\n dssp[(chainid, res_id)] = (aa, ss, acc, phi, psi, dssp_index,\n NH_O_1_relidx, NH_O_1_energy, O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy, O_NH_2_relidx, O_NH_2_energy)\n keys.append((chainid, res_id))\n return dssp, keys\n\n\nclass DSSP(AbstractResiduePropertyMap):\n \"\"\"Run DSSP and parse secondary structure and accessibility.\n\n Run DSSP on a pdb file, and provide a handle to the\n DSSP secondary structure and accessibility.\n\n **Note** that DSSP can only handle one model.\n\n Example:\n --------\n\n >>> p = PDBParser()\n >>> structure = p.get_structure(\"1MOT\", \"1MOT.pdb\")\n >>> model = structure[0]\n >>> dssp = DSSP(model, \"1MOT.pdb\")\n >>> # DSSP data is accessed by a tuple (chain_id, res_id)\n >>> a_key = list(dssp.keys())[2]\n >>> # residue object, secondary structure, solvent accessibility,\n >>> # relative accessiblity, phi, psi\n >>> dssp[a_key]\n (<Residue ALA het= resseq=251 icode= >,\n 'H',\n 72,\n 0.67924528301886788,\n -61.200000000000003,\n -42.399999999999999)\n \"\"\"\n\n def __init__(self, model, pdb_file, dssp=\"dssp\", acc_array=\"Sander\"):\n \"\"\"Create a DSSP object.\n\n Parameters\n ----------\n model : Model\n the first model of the structure\n pdb_file : string\n a PDB file\n dssp : string\n the dssp executable (ie. the argument to os.system)\n acc_array : string\n Accessible surface area (ASA) from either Miller et al. (1987),\n Sander & Rost (1994), or Wilke: Tien et al. 2013, as string\n Sander/Wilke/Miller. Defaults to Sander.\n \"\"\"\n\n self.residue_max_acc = residue_max_acc[acc_array]\n \n # create DSSP dictionary\n dssp_dict, dssp_keys = dssp_dict_from_pdb_file(pdb_file, dssp)\n dssp_map = {}\n dssp_list = []\n\n def resid2code(res_id):\n \"\"\"Serialize a residue's resseq and icode for easy comparison.\"\"\"\n return '%s%s' % (res_id[1], res_id[2])\n\n # Now create a dictionary that maps Residue objects to\n # secondary structure and accessibility, and a list of\n # (residue, (secondary structure, accessibility)) tuples\n for key in dssp_keys:\n chain_id, res_id = key\n chain = model[chain_id]\n try:\n res = chain[res_id]\n except KeyError:\n # In DSSP, HET field is not considered in residue identifier.\n # Thus HETATM records may cause unnecessary exceptions.\n # (See 3jui chain A res 593.)\n # Try the lookup again with all HETATM other than water\n res_seq_icode = resid2code(res_id)\n for r in chain:\n if r.id[0] not in (' ', 'W'):\n # Compare resseq + icode\n if resid2code(r.id) == res_seq_icode:\n # Found a matching residue\n res = r\n break\n else:\n raise KeyError(res_id)\n\n # For disordered residues of point mutations, BioPython uses the\n # last one as default, But DSSP takes the first one (alternative\n # location is blank, A or 1). See 1h9h chain E resi 22.\n # Here we select the res in which all atoms have altloc blank, A or\n # 1. If no such residues are found, simply use the first one appears\n # (as DSSP does).\n if res.is_disordered() == 2:\n for rk in res.disordered_get_id_list():\n # All atoms in the disordered residue should have the same\n # altloc, so it suffices to check the altloc of the first\n # atom.\n altloc = res.child_dict[rk].get_list()[0].get_altloc()\n if altloc in tuple('A1 '):\n res.disordered_select(rk)\n break\n else:\n # Simply select the first one\n res.disordered_select(res.disordered_get_id_list()[0])\n\n # Sometimes point mutations are put into HETATM and ATOM with altloc\n # 'A' and 'B'.\n # See 3piu chain A residue 273:\n # <Residue LLP het=H_LLP resseq=273 icode= >\n # <Residue LYS het= resseq=273 icode= >\n # DSSP uses the HETATM LLP as it has altloc 'A'\n # We check the altloc code here.\n elif res.is_disordered() == 1:\n # Check altloc of all atoms in the DisorderedResidue. If it\n # contains blank, A or 1, then use it. Otherwise, look for HET\n # residues of the same seq+icode. If not such HET residues are\n # found, just accept the current one.\n altlocs = set(a.get_altloc() for a in res.get_unpacked_list())\n if altlocs.isdisjoint('A1 '):\n # Try again with all HETATM other than water\n res_seq_icode = resid2code(res_id)\n for r in chain:\n if r.id[0] not in (' ', 'W'):\n if resid2code(r.id) == res_seq_icode and \\\n r.get_list()[0].get_altloc() in tuple('A1 '):\n res = r\n break\n\n (aa, ss, acc, phi, psi, dssp_index,\n NH_O_1_relidx, NH_O_1_energy,\n O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy,\n O_NH_2_relidx, O_NH_2_energy) = dssp_dict[key]\n\n res.xtra[\"SS_DSSP\"] = ss\n res.xtra[\"EXP_DSSP_ASA\"] = acc\n res.xtra[\"PHI_DSSP\"] = phi\n res.xtra[\"PSI_DSSP\"] = psi\n res.xtra[\"DSSP_INDEX\"] = dssp_index\n res.xtra[\"NH_O_1_RELIDX_DSSP\"] = NH_O_1_relidx\n res.xtra[\"NH_O_1_ENERGY_DSSP\"] = NH_O_1_energy\n res.xtra[\"O_NH_1_RELIDX_DSSP\"] = O_NH_1_relidx\n res.xtra[\"O_NH_1_ENERGY_DSSP\"] = O_NH_1_energy\n res.xtra[\"NH_O_2_RELIDX_DSSP\"] = NH_O_2_relidx\n res.xtra[\"NH_O_2_ENERGY_DSSP\"] = NH_O_2_energy\n res.xtra[\"O_NH_2_RELIDX_DSSP\"] = O_NH_2_relidx\n res.xtra[\"O_NH_2_ENERGY_DSSP\"] = O_NH_2_energy\n\n # Relative accessibility\n resname = res.get_resname()\n try:\n rel_acc = acc / self.residue_max_acc[resname]\n except KeyError:\n # Invalid value for resname\n rel_acc = 'NA'\n else:\n if rel_acc > 1.0:\n rel_acc = 1.0\n res.xtra[\"EXP_DSSP_RASA\"] = rel_acc\n # Verify if AA in DSSP == AA in Structure\n # Something went wrong if this is not true!\n # NB: DSSP uses X often\n resname = SCOPData.protein_letters_3to1.get(resname, 'X')\n if resname == \"C\":\n # DSSP renames C in C-bridges to a,b,c,d,...\n # - we rename it back to 'C'\n if _dssp_cys.match(aa):\n aa = 'C'\n # Take care of HETATM again\n if (resname != aa) and (res.id[0] == ' ' or aa != 'X'):\n raise PDBException(\"Structure/DSSP mismatch at %s\" % res)\n\n dssp_vals = (dssp_index, aa, ss, rel_acc, phi, psi,\n NH_O_1_relidx, NH_O_1_energy,\n O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy,\n O_NH_2_relidx, O_NH_2_energy)\n\n dssp_map[key] = dssp_vals\n dssp_list.append(dssp_vals)\n\n AbstractResiduePropertyMap.__init__(self, dssp_map, dssp_keys,\n dssp_list)\n\n\nif __name__ == \"__main__\":\n import sys\n\n p = PDBParser()\n s = p.get_structure('X', sys.argv[1])\n model = s[0]\n d = DSSP(model, sys.argv[1])\n\n for r in d:\n print(r)\n print(\"Handled %i residues\" % len(d))\n print(sorted(d))\n if ('A', 1) in d:\n print(d[('A', 1)])\n print(s[0]['A'][1].xtra)\n # Secondary structure\n print(''.join(item[1] for item in d))\n", "path": "Bio/PDB/DSSP.py" } ]
[ { "content": "# Copyright (C) 2002, Thomas Hamelryck ([email protected])\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Use the DSSP program to calculate secondary structure and accessibility.\n\nYou need to have a working version of DSSP (and a license, free for academic\nuse) in order to use this. For DSSP, see U{http://swift.cmbi.ru.nl/gv/dssp/}.\n\nThe DSSP codes for secondary structure used here are:\n\n H\n Alpha helix (4-12)\n B\n Isolated beta-bridge residue\n E\n Strand\n G\n 3-10 helix\n I\n pi helix\n T\n Turn\n S\n Bend\n \\-\n None\n\nThe following Accessible surface area (ASA) values can be used, defaulting\nto the Sander and Rost values:\n\n Miller\n Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6\n Sander\n Sander and Rost 1994 http://dx.doi.org/10.1002/prot.340200303\n Wilke\n Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport re\nfrom Bio._py3k import StringIO\nimport subprocess\nimport warnings\n\nfrom Bio.Data import SCOPData\n\nfrom Bio.PDB.AbstractPropertyMap import AbstractResiduePropertyMap\nfrom Bio.PDB.PDBExceptions import PDBException\nfrom Bio.PDB.PDBParser import PDBParser\n\n\n# Match C in DSSP\n_dssp_cys = re.compile('[a-z]')\n\n# Maximal ASA of amino acids\n# Used for relative accessibility\n\nresidue_max_acc = {\n # Miller max acc: Miller et al. 1987 http://dx.doi.org/10.1016/0022-2836(87)90038-6\n # Wilke: Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635\n # Sander: Sander & Rost 1994 http://dx.doi.org/10.1002/prot.340200303\n 'Miller': {\n 'ALA': 113.0, 'ARG': 241.0, 'ASN': 158.0, 'ASP': 151.0,\n 'CYS': 140.0, 'GLN': 189.0, 'GLU': 183.0, 'GLY': 85.0,\n 'HIS': 194.0, 'ILE': 182.0, 'LEU': 180.0, 'LYS': 211.0,\n 'MET': 204.0, 'PHE': 218.0, 'PRO': 143.0, 'SER': 122.0,\n 'THR': 146.0, 'TRP': 259.0, 'TYR': 229.0, 'VAL': 160.0\n },\n 'Wilke': {\n 'ALA': 129.0, 'ARG': 274.0, 'ASN': 195.0, 'ASP': 193.0,\n 'CYS': 167.0, 'GLN': 225.0, 'GLU': 223.0, 'GLY': 104.0,\n 'HIS': 224.0, 'ILE': 197.0, 'LEU': 201.0, 'LYS': 236.0,\n 'MET': 224.0, 'PHE': 240.0, 'PRO': 159.0, 'SER': 155.0,\n 'THR': 172.0, 'TRP': 285.0, 'TYR': 263.0, 'VAL': 174.0\n },\n 'Sander': {\n 'ALA': 126.0, 'ARG': 248.0, 'ASN': 157.0, 'ASP': 163.0,\n 'CYS': 135.0, 'GLN': 198.0, 'GLU': 194.0, 'GLY': 84.0,\n 'HIS': 184.0, 'ILE': 169.0, 'LEU': 164.0, 'LYS': 205.0,\n 'MET': 188.0, 'PHE': 197.0, 'PRO': 136.0, 'SER': 130.0,\n 'THR': 142.0, 'TRP': 227.0, 'TYR': 222.0, 'VAL': 142.0\n }\n}\n\n\ndef ss_to_index(ss):\n \"\"\"Secondary structure symbol to index.\n\n H=0\n E=1\n C=2\n \"\"\"\n if ss == 'H':\n return 0\n if ss == 'E':\n return 1\n if ss == 'C':\n return 2\n assert 0\n\n\ndef dssp_dict_from_pdb_file(in_file, DSSP=\"dssp\"):\n \"\"\"Create a DSSP dictionary from a PDB file.\n\n Example:\n --------\n >>> dssp_dict=dssp_dict_from_pdb_file(\"1fat.pdb\")\n >>> aa, ss, acc=dssp_dict[('A', 1)]\n\n Parameters\n ----------\n in_file : string\n pdb file\n\n DSSP : string\n DSSP executable (argument to os.system)\n\n Returns\n -------\n (out_dict, keys) : tuple\n a dictionary that maps (chainid, resid) to\n amino acid type, secondary structure code and\n accessibility.\n \"\"\"\n # Using universal newlines is important on Python 3, this\n # gives unicode handles rather than bytes handles.\n p = subprocess.Popen([DSSP, in_file], universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n\n # Alert user for errors\n if err.strip():\n warnings.warn(err)\n if not out.strip():\n raise Exception('DSSP failed to produce an output')\n\n out_dict, keys = _make_dssp_dict(StringIO(out))\n return out_dict, keys\n\n\ndef make_dssp_dict(filename):\n \"\"\"DSSP dictionary mapping identifers to properties.\n\n Return a DSSP dictionary that maps (chainid, resid) to\n aa, ss and accessibility, from a DSSP file. ::\n\n Parameters\n ----------\n filename : string\n the DSSP output file\n \"\"\"\n with open(filename, \"r\") as handle:\n return _make_dssp_dict(handle)\n\n\ndef _make_dssp_dict(handle):\n \"\"\"Internal function used by mask_dssp_dict (PRIVATE).\n\n Return a DSSP dictionary that maps (chainid, resid) to an amino acid,\n secondary structure symbol, solvent accessibility value, and hydrogen bond\n information (relative dssp indices and hydrogen bond energies) from an open\n DSSP file object. ::\n\n Parameters\n ----------\n handle : file\n the open DSSP output file handle\n \"\"\"\n dssp = {}\n start = 0\n keys = []\n for l in handle.readlines():\n sl = l.split()\n if len(sl) < 2:\n continue\n if sl[1] == \"RESIDUE\":\n # Start parsing from here\n start = 1\n continue\n if not start:\n continue\n if l[9] == \" \":\n # Skip -- missing residue\n continue\n\n dssp_index = int(l[:5])\n resseq = int(l[5:10])\n icode = l[10]\n chainid = l[11]\n aa = l[13]\n ss = l[16]\n if ss == \" \":\n ss = \"-\"\n try:\n NH_O_1_relidx = int(l[38:45])\n NH_O_1_energy = float(l[46:50])\n O_NH_1_relidx = int(l[50:56])\n O_NH_1_energy = float(l[57:61])\n NH_O_2_relidx = int(l[61:67])\n NH_O_2_energy = float(l[68:72])\n O_NH_2_relidx = int(l[72:78])\n O_NH_2_energy = float(l[79:83])\n\n acc = int(l[34:38])\n phi = float(l[103:109])\n psi = float(l[109:115])\n except ValueError as exc:\n # DSSP output breaks its own format when there are >9999\n # residues, since only 4 digits are allocated to the seq num\n # field. See 3kic chain T res 321, 1vsy chain T res 6077.\n # Here, look for whitespace to figure out the number of extra\n # digits, and shift parsing the rest of the line by that amount.\n if l[34] != ' ':\n shift = l[34:].find(' ')\n\n NH_O_1_relidx = int(l[38 + shift:45 + shift])\n NH_O_1_energy = float(l[46 + shift:50 + shift])\n O_NH_1_relidx = int(l[50 + shift:56 + shift])\n O_NH_1_energy = float(l[57 + shift:61 + shift])\n NH_O_2_relidx = int(l[61 + shift:67 + shift])\n NH_O_2_energy = float(l[68 + shift:72 + shift])\n O_NH_2_relidx = int(l[72 + shift:78 + shift])\n O_NH_2_energy = float(l[79 + shift:83 + shift])\n\n acc = int((l[34 + shift:38 + shift]))\n phi = float(l[103 + shift:109 + shift])\n psi = float(l[109 + shift:115 + shift])\n else:\n raise ValueError(exc)\n res_id = (\" \", resseq, icode)\n dssp[(chainid, res_id)] = (aa, ss, acc, phi, psi, dssp_index,\n NH_O_1_relidx, NH_O_1_energy, O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy, O_NH_2_relidx, O_NH_2_energy)\n keys.append((chainid, res_id))\n return dssp, keys\n\n\nclass DSSP(AbstractResiduePropertyMap):\n \"\"\"Run DSSP and parse secondary structure and accessibility.\n\n Run DSSP on a pdb file, and provide a handle to the\n DSSP secondary structure and accessibility.\n\n **Note** that DSSP can only handle one model.\n\n Example:\n --------\n\n >>> p = PDBParser()\n >>> structure = p.get_structure(\"1MOT\", \"1MOT.pdb\")\n >>> model = structure[0]\n >>> dssp = DSSP(model, \"1MOT.pdb\")\n >>> # DSSP data is accessed by a tuple (chain_id, res_id)\n >>> a_key = list(dssp.keys())[2]\n >>> # residue object, secondary structure, solvent accessibility,\n >>> # relative accessiblity, phi, psi\n >>> dssp[a_key]\n (<Residue ALA het= resseq=251 icode= >,\n 'H',\n 72,\n 0.67924528301886788,\n -61.200000000000003,\n -42.399999999999999)\n \"\"\"\n\n def __init__(self, model, pdb_file, dssp=\"dssp\", acc_array=\"Sander\"):\n \"\"\"Create a DSSP object.\n\n Parameters\n ----------\n model : Model\n the first model of the structure\n pdb_file : string\n a PDB file\n dssp : string\n the dssp executable (ie. the argument to os.system)\n acc_array : string\n Accessible surface area (ASA) from either Miller et al. (1987),\n Sander & Rost (1994), or Wilke: Tien et al. 2013, as string\n Sander/Wilke/Miller. Defaults to Sander.\n \"\"\"\n\n self.residue_max_acc = residue_max_acc[acc_array]\n\n # create DSSP dictionary\n dssp_dict, dssp_keys = dssp_dict_from_pdb_file(pdb_file, dssp)\n dssp_map = {}\n dssp_list = []\n\n def resid2code(res_id):\n \"\"\"Serialize a residue's resseq and icode for easy comparison.\"\"\"\n return '%s%s' % (res_id[1], res_id[2])\n\n # Now create a dictionary that maps Residue objects to\n # secondary structure and accessibility, and a list of\n # (residue, (secondary structure, accessibility)) tuples\n for key in dssp_keys:\n chain_id, res_id = key\n chain = model[chain_id]\n try:\n res = chain[res_id]\n except KeyError:\n # In DSSP, HET field is not considered in residue identifier.\n # Thus HETATM records may cause unnecessary exceptions.\n # (See 3jui chain A res 593.)\n # Try the lookup again with all HETATM other than water\n res_seq_icode = resid2code(res_id)\n for r in chain:\n if r.id[0] not in (' ', 'W'):\n # Compare resseq + icode\n if resid2code(r.id) == res_seq_icode:\n # Found a matching residue\n res = r\n break\n else:\n raise KeyError(res_id)\n\n # For disordered residues of point mutations, BioPython uses the\n # last one as default, But DSSP takes the first one (alternative\n # location is blank, A or 1). See 1h9h chain E resi 22.\n # Here we select the res in which all atoms have altloc blank, A or\n # 1. If no such residues are found, simply use the first one appears\n # (as DSSP does).\n if res.is_disordered() == 2:\n for rk in res.disordered_get_id_list():\n # All atoms in the disordered residue should have the same\n # altloc, so it suffices to check the altloc of the first\n # atom.\n altloc = res.child_dict[rk].get_list()[0].get_altloc()\n if altloc in tuple('A1 '):\n res.disordered_select(rk)\n break\n else:\n # Simply select the first one\n res.disordered_select(res.disordered_get_id_list()[0])\n\n # Sometimes point mutations are put into HETATM and ATOM with altloc\n # 'A' and 'B'.\n # See 3piu chain A residue 273:\n # <Residue LLP het=H_LLP resseq=273 icode= >\n # <Residue LYS het= resseq=273 icode= >\n # DSSP uses the HETATM LLP as it has altloc 'A'\n # We check the altloc code here.\n elif res.is_disordered() == 1:\n # Check altloc of all atoms in the DisorderedResidue. If it\n # contains blank, A or 1, then use it. Otherwise, look for HET\n # residues of the same seq+icode. If not such HET residues are\n # found, just accept the current one.\n altlocs = set(a.get_altloc() for a in res.get_unpacked_list())\n if altlocs.isdisjoint('A1 '):\n # Try again with all HETATM other than water\n res_seq_icode = resid2code(res_id)\n for r in chain:\n if r.id[0] not in (' ', 'W'):\n if resid2code(r.id) == res_seq_icode and \\\n r.get_list()[0].get_altloc() in tuple('A1 '):\n res = r\n break\n\n (aa, ss, acc, phi, psi, dssp_index,\n NH_O_1_relidx, NH_O_1_energy,\n O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy,\n O_NH_2_relidx, O_NH_2_energy) = dssp_dict[key]\n\n res.xtra[\"SS_DSSP\"] = ss\n res.xtra[\"EXP_DSSP_ASA\"] = acc\n res.xtra[\"PHI_DSSP\"] = phi\n res.xtra[\"PSI_DSSP\"] = psi\n res.xtra[\"DSSP_INDEX\"] = dssp_index\n res.xtra[\"NH_O_1_RELIDX_DSSP\"] = NH_O_1_relidx\n res.xtra[\"NH_O_1_ENERGY_DSSP\"] = NH_O_1_energy\n res.xtra[\"O_NH_1_RELIDX_DSSP\"] = O_NH_1_relidx\n res.xtra[\"O_NH_1_ENERGY_DSSP\"] = O_NH_1_energy\n res.xtra[\"NH_O_2_RELIDX_DSSP\"] = NH_O_2_relidx\n res.xtra[\"NH_O_2_ENERGY_DSSP\"] = NH_O_2_energy\n res.xtra[\"O_NH_2_RELIDX_DSSP\"] = O_NH_2_relidx\n res.xtra[\"O_NH_2_ENERGY_DSSP\"] = O_NH_2_energy\n\n # Relative accessibility\n resname = res.get_resname()\n try:\n rel_acc = acc / self.residue_max_acc[resname]\n except KeyError:\n # Invalid value for resname\n rel_acc = 'NA'\n else:\n if rel_acc > 1.0:\n rel_acc = 1.0\n res.xtra[\"EXP_DSSP_RASA\"] = rel_acc\n # Verify if AA in DSSP == AA in Structure\n # Something went wrong if this is not true!\n # NB: DSSP uses X often\n resname = SCOPData.protein_letters_3to1.get(resname, 'X')\n if resname == \"C\":\n # DSSP renames C in C-bridges to a,b,c,d,...\n # - we rename it back to 'C'\n if _dssp_cys.match(aa):\n aa = 'C'\n # Take care of HETATM again\n if (resname != aa) and (res.id[0] == ' ' or aa != 'X'):\n raise PDBException(\"Structure/DSSP mismatch at %s\" % res)\n\n dssp_vals = (dssp_index, aa, ss, rel_acc, phi, psi,\n NH_O_1_relidx, NH_O_1_energy,\n O_NH_1_relidx, O_NH_1_energy,\n NH_O_2_relidx, NH_O_2_energy,\n O_NH_2_relidx, O_NH_2_energy)\n\n dssp_map[key] = dssp_vals\n dssp_list.append(dssp_vals)\n\n AbstractResiduePropertyMap.__init__(self, dssp_map, dssp_keys,\n dssp_list)\n\n\nif __name__ == \"__main__\":\n import sys\n\n p = PDBParser()\n s = p.get_structure('X', sys.argv[1])\n model = s[0]\n d = DSSP(model, sys.argv[1])\n\n for r in d:\n print(r)\n print(\"Handled %i residues\" % len(d))\n print(sorted(d))\n if ('A', 1) in d:\n print(d[('A', 1)])\n print(s[0]['A'][1].xtra)\n # Secondary structure\n print(''.join(item[1] for item in d))\n", "path": "Bio/PDB/DSSP.py" } ]
diff --git a/.travis-tox.ini b/.travis-tox.ini new file mode 100644 index 00000000000..0aa225d4ee9 --- /dev/null +++ b/.travis-tox.ini @@ -0,0 +1,103 @@ +# This is a configuration file for tox, used to test +# Biopython on various versions of Python etc under +# the Travis Continous Integration service which is +# configured in the file .travis.yml +# +# By default tox will look for tox.ini, so this file +# will not conflict with any personal tox setup. +# +# You can explicitly use this tox configuration on your +# own machine (via the -c setting), PROVIDED you have +# all the relevant versions of Python installed. e.g. +# specifying a specific target envronment (via -e): +# +# $ pip install tox +# $ tox -c .travis-tox.ini -e py35-nocov +# +# Or with test coverage: +# +# $ pip install tox coverage +# $ tox -c .travis-tox.ini -e py35-cover +# +# Or to run the pep8 Python coding style checks: +# +# $ pip install tox pep8 +# $ tox -c .travis-tox.ini -e pep8 +# +# See the envlist setting for other valid arguments. + +[tox] +minversion = 2.0 +skipsdist = True +envlist = + pep8 + sdist + bdist_wheel + {py26,py27,py33,py34,py35,pypy,pypy3}-cover + {py26,py27,py33,py34,py35,pypy,pypy3}-nocov + +[testenv] +# TODO: Try tox default sdist based install instead: +skip_install = True +sitepackages = True +whitelist_externals = + bash + echo +# Want to avoid overhead of compiling numpy or scipy: +install_command = pip install --only-binary=numpy,scipy {opts} {packages} +deps = + #Lines startings xxx: are filtered by the environment. + #Leaving py34 without any soft dependencies (just numpy) + cover: coverage + cover: codecov + py26: unittest2 + {py26,py27}: mysql-python + {py26,py27,py33,py35}: mmtf-python + {py26,py27,py33,py35}: reportlab + {py26,py27,py33,py35}: psycopg2 + {py26,py27,py33,py35,pypy}: mysql-connector-python-rf + {py26,py27,py33,py35,pypy}: rdflib + {py26,py27,py33,py34,py35}: numpy + {py26,py35}: scipy + py27: networkx + py35: matplotlib +commands = + #The bash call is a work around for the pipe character + #The yes is in case we get our prompt about missing NumPy + #The /dev/null is to hide the verbose output but leave warnings + bash -c \'/usr/bin/yes | python setup.py install > /dev/null\' + #The bash call is a work around for the cd command + nocov: bash -c \'cd Tests && python run_tests.py --offline\' + #See https://codecov.io/ and https://github.com/codecov/example-python + cover: bash -c \'cd Tests && coverage run --source=Bio,BioSQL run_tests.py --offline\' + cover: codecov + +[testenv:pep8] +# This does not need to install Biopython or any of its dependencies +skip_install = True +whitelist_externals = + pep8 +deps = + pep8 +commands = + pep8 --max-line-length 92 BioSQL/ + pep8 --ignore E402 --max-line-length 90 Scripts/ + pep8 --max-line-length 90 Doc/examples/ + pep8 --ignore E122,E123,E126,E127,E128,E129,E501,E731 Bio/ + pep8 --ignore E122,E123,E126,E127,E128,E241,E402,E501,E731 Tests/ + +[testenv:sdist] +# This does not need to install Biopython or any of its dependencies +skip_install = True +deps = +commands = + python setup.py sdist --manifest-only + python setup.py sdist --formats=gztar,zip + +[testenv:bdist_wheel] +# This should use NumPy while compiling our C code... +skip_install = True +deps = + numpy +commands = + python setup.py bdist_wheel diff --git a/.travis.yml b/.travis.yml index ab754028a98..b6373701acf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,39 +1,35 @@ # Special configuration file to run tests on Travis-CI via GitHub notifications # See https://travis-ci.org/biopython/biopython/builds for results # -# The tests are run via the coverage script, and if the tests pass the coverage -# information is pushed to https://codecov.io/github/biopython/biopython +# Using TravisCI's configuration alone became too complicated once we wanted +# to cover more than just running the unit tests on different versions of +# Python, so this now does all the complicated test configuration via TOX +# See file .travis-tox.ini # -# Note when testing Python 3, the 'python' command will invoke Python 3 -# and similarly for PyPy too. - -# Environment variables setup via the matrix -# - DEP = list of python packages to install via default pip install -# - COV = yes/no; should tests be run with coverage metric collection -# - OPT = options to run_tests.py such as --offline +# Environment variables setup here: +# - TOXENV = environment used in Tox (conventionally uses py35 etc) # -# Note we're explicitly allowing the online test targets to fail without -# considering the whole test run to be a failure. language: python matrix: include: - - python: "2.6" - env: DEP="mmtf-python rdflib reportlab psycopg2 mysql-python mysql-connector-python-rf unittest2 scipy" COV="yes" OPT="--offline" - - python: "2.7" - env: DEP="mmtf-python rdflib reportlab psycopg2 mysql-python mysql-connector-python-rf networkx" COV="yes" OPT="--offline" - - python: "3.3" - env: DEP="mmtf-python psycopg2 mysql-connector-python-rf" COV="yes" OPT="" - - python: "3.4" - env: DEP="" COV="yes" OPT="--offline" - - python: "3.5" - env: DEP="mmtf-python rdflib reportlab psycopg2 mysql-connector-python-rf scipy matplotlib" COV="yes" OPT="--offline" - - python: "pypy" - env: DEP="rdflib mysql-connector-python-rf" COV="yes" OPT="--offline" - - python: "pypy3" - env: COV="no" OPT="--offline" - allow_failures: - - python: "3.3" + - env: TOXENV=pep8 + - env: TOXENV=sdist + - env: TOXENV=bdist_wheel + - python: 2.6 + env: TOXENV=py26-cover + - python: 2.7 + env: TOXENV=py27-cover + - python: 3.3 + env: TOXENV=py33-cover + - python: 3.4 + env: TOXENV=py34-cover + - python: 3.5 + env: TOXENV=py35-cover + - python: pypy + env: TOXENV=pypy-cover + - python: pypy3 + env: TOXENV=pypy3-nocov sudo: false addons: @@ -49,30 +45,14 @@ addons: - samtools - wise -before_install: - - "pip install --upgrade pip setuptools" - - "pip install --only-binary=numpy,scipy $DEP" - - "if [[ $COV == 'yes' ]]; then pip install coverage; fi" - - install: -#The yes is in case we get our prompt about missing NumPy - - "/usr/bin/yes | python setup.py install" - -before_script: - - cd Tests - - cp biosql.ini.sample biosql.ini + - "cp Tests/biosql.ini.sample Tests/biosql.ini" + - "pip install --upgrade pip setuptools" + - "pip install tox" + - "tox -c .travis-tox.ini -e $TOXENV --notest" script: -#Using just coverage should match up to the current Python version: - - "if [[ $COV == 'yes' ]]; then coverage run --source=Bio,BioSQL run_tests.py $OPT; fi" - - "if [[ $COV != 'yes' ]]; then python run_tests.py $OPT; fi" - -after_success: -#See https://codecov.io/ and https://github.com/codecov/example-python - - "if [[ $COV == 'yes' ]]; then pip install codecov; fi" - - "if [[ $COV == 'yes' ]]; then codecov; fi" + - "tox -c .travis-tox.ini -e $TOXENV" -#The email defaults are too talkative notifications: email: false diff --git a/Bio/PDB/DSSP.py b/Bio/PDB/DSSP.py index 2ff1e59af47..8fcc3da2e56 100644 --- a/Bio/PDB/DSSP.py +++ b/Bio/PDB/DSSP.py @@ -285,7 +285,7 @@ def __init__(self, model, pdb_file, dssp="dssp", acc_array="Sander"): """ self.residue_max_acc = residue_max_acc[acc_array] - + # create DSSP dictionary dssp_dict, dssp_keys = dssp_dict_from_pdb_file(pdb_file, dssp) dssp_map = {}
biopython__biopython-4683
two test failures affecting Bio.SeqIO.SeqXmlIO in Debian sid Greetings, This issue may not be an easy one, and even risks being Debian specific, so if you are busy somewhere else, then I won't mind. :) I'm having great difficulties pinpointing a change between Debian testing and sid which is causing [Debian bug #1064147], affecting both Biopython 1.81 and 1.83 in sid, but none of them in testing (as of today). The error output comes from the test suite, and gives in Debian sid: ====================================================================== ERROR: test_embl7 (test_SeqIO.TestSeqIO.test_embl7) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 3406, in test_embl7 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ====================================================================== ERROR: test_genbank8 (test_SeqIO.TestSeqIO.test_genbank8) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 2803, in test_genbank8 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range Version comparison of Biopython direct dependencies suggest they are not involved in the test regression I observe: * python3-numpy version in sid: 1:1.24.2-2 * python3-numpy version in testing: 1:1.24.2-2 * python3 version in sid: 3.11.6-1 * python3 version in testing: 3.11.6-1 * libc6 version in sid: 2.37-15 * libc6 version in testing: 2.37-15 * python3-reportlab version in sid: 4.1.0-1 * python3-reportlab version in testing: 4.1.0-1 * w3c-sgml-lib version in sid: 1.3-3 * w3c-sgml-lib version in testing: 1.3-3 This means that the issue is caused by a transitive dependency (which I have not managed to identify yet), or something else entirely. I also reproduced the problem on [Debian Salsa CI], which strongly hints that I haven't mishandled my build environment. Besides, if one of the direct dependencies had triggered the bug, then I would have expected the test run triggered by the [package migration CI] to have tripped, which has not happened (which hints that the situation did not appear before 2024 February 3rd in sid). I wouldn't be surprised to learn that this isn't be a problem in Biopython per se, but I'm running out of options without your thoughts upstream (maybe the combination of these two particular tests failing and the others passing hints to something in particular?). Do you per chance have an idea of what I am missing that could cause the SeqXmlIO records to be empty and cause these test failures? Thank you for your time, Étienne. PS: here below, the mandatory form for your convenience: ### Setup I am reporting a problem with Biopython version, Python version, and operating system as follows. In Debian sid: >>> import sys; print(sys.version) 3.11.8 (main, Feb 7 2024, 21:52:08) [GCC 13.2.0] >>> import platform; print(platform.python_implementation()); print(platform.platform()) CPython Linux-6.6.15-amd64-x86_64-with-glibc2.37 >>> import Bio; print(Bio.__version__) 1.83 In Debian testing (no changes visibly): >>> import sys; print(sys.version) 3.11.8 (main, Feb 7 2024, 21:52:08) [GCC 13.2.0] >>> import platform; print(platform.python_implementation()); print(platform.platform()) CPython Linux-6.6.15-amd64-x86_64-with-glibc2.37 >>> import Bio; print(Bio.__version__) 1.83 This is also valid with Biopython 1.81. ### Expected behaviour I would like to make all test items of the test suite pass on Debian sid. ### Actual behaviour All test items pass on Debian testing. However on Debian sid, I observe the following test items failing: ====================================================================== ERROR: test_embl7 (test_SeqIO.TestSeqIO.test_embl7) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 3406, in test_embl7 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ====================================================================== ERROR: test_genbank8 (test_SeqIO.TestSeqIO.test_genbank8) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 2803, in test_genbank8 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ### Steps to reproduce Run the test suite. [Debian bug #1064147]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1064147 [Debian Salsa CI]: https://salsa.debian.org/med-team/python-biopython/-/jobs/5314383 [package migration CI]: https://ci.debian.net/packages/p/python-biopython/unstable/amd64/ two test failures affecting Bio.SeqIO.SeqXmlIO in Debian sid Greetings, This issue may not be an easy one, and even risks being Debian specific, so if you are busy somewhere else, then I won't mind. :) I'm having great difficulties pinpointing a change between Debian testing and sid which is causing [Debian bug #1064147], affecting both Biopython 1.81 and 1.83 in sid, but none of them in testing (as of today). The error output comes from the test suite, and gives in Debian sid: ====================================================================== ERROR: test_embl7 (test_SeqIO.TestSeqIO.test_embl7) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 3406, in test_embl7 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ====================================================================== ERROR: test_genbank8 (test_SeqIO.TestSeqIO.test_genbank8) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 2803, in test_genbank8 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range Version comparison of Biopython direct dependencies suggest they are not involved in the test regression I observe: * python3-numpy version in sid: 1:1.24.2-2 * python3-numpy version in testing: 1:1.24.2-2 * python3 version in sid: 3.11.6-1 * python3 version in testing: 3.11.6-1 * libc6 version in sid: 2.37-15 * libc6 version in testing: 2.37-15 * python3-reportlab version in sid: 4.1.0-1 * python3-reportlab version in testing: 4.1.0-1 * w3c-sgml-lib version in sid: 1.3-3 * w3c-sgml-lib version in testing: 1.3-3 This means that the issue is caused by a transitive dependency (which I have not managed to identify yet), or something else entirely. I also reproduced the problem on [Debian Salsa CI], which strongly hints that I haven't mishandled my build environment. Besides, if one of the direct dependencies had triggered the bug, then I would have expected the test run triggered by the [package migration CI] to have tripped, which has not happened (which hints that the situation did not appear before 2024 February 3rd in sid). I wouldn't be surprised to learn that this isn't be a problem in Biopython per se, but I'm running out of options without your thoughts upstream (maybe the combination of these two particular tests failing and the others passing hints to something in particular?). Do you per chance have an idea of what I am missing that could cause the SeqXmlIO records to be empty and cause these test failures? Thank you for your time, Étienne. PS: here below, the mandatory form for your convenience: ### Setup I am reporting a problem with Biopython version, Python version, and operating system as follows. In Debian sid: >>> import sys; print(sys.version) 3.11.8 (main, Feb 7 2024, 21:52:08) [GCC 13.2.0] >>> import platform; print(platform.python_implementation()); print(platform.platform()) CPython Linux-6.6.15-amd64-x86_64-with-glibc2.37 >>> import Bio; print(Bio.__version__) 1.83 In Debian testing (no changes visibly): >>> import sys; print(sys.version) 3.11.8 (main, Feb 7 2024, 21:52:08) [GCC 13.2.0] >>> import platform; print(platform.python_implementation()); print(platform.platform()) CPython Linux-6.6.15-amd64-x86_64-with-glibc2.37 >>> import Bio; print(Bio.__version__) 1.83 This is also valid with Biopython 1.81. ### Expected behaviour I would like to make all test items of the test suite pass on Debian sid. ### Actual behaviour All test items pass on Debian testing. However on Debian sid, I observe the following test items failing: ====================================================================== ERROR: test_embl7 (test_SeqIO.TestSeqIO.test_embl7) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 3406, in test_embl7 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ====================================================================== ERROR: test_genbank8 (test_SeqIO.TestSeqIO.test_genbank8) ---------------------------------------------------------------------- Traceback (most recent call last): File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 2803, in test_genbank8 self.perform_test( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 625, in perform_test self.check_simple_write_read( File "/tmp/autopkgtest.OslsYr/autopkgtest_tmp/Tests/test_SeqIO.py", line 362, in check_simple_write_read records2 = list(SeqIO.parse(handle=handle, format=fmt)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/Interfaces.py", line 85, in __next__ return next(self.records) ^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 482, in iterate parser.close() File "/usr/lib/python3.11/xml/sax/expatreader.py", line 240, in close self.feed(b"", isFinal=True) File "/usr/lib/python3.11/xml/sax/expatreader.py", line 217, in feed self._parser.Parse(data, isFinal) File "../Modules/pyexpat.c", line 416, in StartElement File "/usr/lib/python3.11/xml/sax/expatreader.py", line 369, in start_element_ns self._cont_handler.startElementNS(pair, None, File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 184, in startEntryFieldElement return self.startPropertyElement(attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib/python3/dist-packages/Bio/SeqIO/SeqXmlIO.py", line 374, in startPropertyElement record = self.records[-1] ~~~~~~~~~~~~^^^^ IndexError: list index out of range ### Steps to reproduce Run the test suite. [Debian bug #1064147]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1064147 [Debian Salsa CI]: https://salsa.debian.org/med-team/python-biopython/-/jobs/5314383 [package migration CI]: https://ci.debian.net/packages/p/python-biopython/unstable/amd64/
[ { "content": "# Copyright 2010 by Thomas Schmitt.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Bio.SeqIO support for the \"seqxml\" file format, SeqXML.\n\nThis module is for reading and writing SeqXML format files as\nSeqRecord objects, and is expected to be used via the Bio.SeqIO API.\n\nSeqXML is a lightweight XML format which is supposed be an alternative for\nFASTA files. For more Information see http://www.seqXML.org and Schmitt et al\n(2011), https://doi.org/10.1093/bib/bbr025\n\"\"\"\n\nfrom xml import sax\nfrom xml.sax import handler\nfrom xml.sax.saxutils import XMLGenerator\nfrom xml.sax.xmlreader import AttributesImpl\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .Interfaces import SequenceIterator\nfrom .Interfaces import SequenceWriter\n\n\nclass ContentHandler(handler.ContentHandler):\n \"\"\"Handles XML events generated by the parser (PRIVATE).\"\"\"\n\n def __init__(self):\n \"\"\"Create a handler to handle XML events.\"\"\"\n super().__init__()\n self.source = None\n self.sourceVersion = None\n self.seqXMLversion = None\n self.ncbiTaxID = None\n self.speciesName = None\n self.startElementNS = None\n self.data = None\n self.records = []\n\n def startDocument(self):\n \"\"\"Set XML handlers when an XML declaration is found.\"\"\"\n self.startElementNS = self.startSeqXMLElement\n\n def startSeqXMLElement(self, name, qname, attrs):\n \"\"\"Handle start of a seqXML element.\"\"\"\n if name != (None, \"seqXML\"):\n raise ValueError(\"Failed to find the start of seqXML element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for seqXML element\")\n schema = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"source\":\n self.source = value\n elif localname == \"sourceVersion\":\n self.sourceVersion = value\n elif localname == \"seqXMLversion\":\n self.seqXMLversion = value\n elif localname == \"ncbiTaxID\":\n # check if it is an integer, but store as string\n number = int(value)\n self.ncbiTaxID = value\n elif localname == \"speciesName\":\n self.speciesName = value\n else:\n raise ValueError(\"Unexpected attribute for XML Schema\")\n elif namespace == \"http://www.w3.org/2001/XMLSchema-instance\":\n if localname == \"noNamespaceSchemaLocation\":\n schema = value\n else:\n raise ValueError(\"Unexpected attribute for XML Schema in namespace\")\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for seqXML attribute\"\n )\n if self.seqXMLversion is None:\n raise ValueError(\"Failed to find seqXMLversion\")\n elif self.seqXMLversion not in (\"0.1\", \"0.2\", \"0.3\", \"0.4\"):\n raise ValueError(\"Unsupported seqXMLversion\")\n url = f\"http://www.seqxml.org/{self.seqXMLversion}/seqxml.xsd\"\n if schema is not None and schema != url:\n raise ValueError(\n \"XML Schema '%s' found not consistent with reported seqXML version %s\"\n % (schema, self.seqXMLversion)\n )\n # speciesName and ncbiTaxID attributes on the root are only supported\n # in 0.4\n if self.speciesName and self.seqXMLversion != \"0.4\":\n raise ValueError(\n \"Attribute 'speciesName' on root is only supported in version 0.4\"\n )\n if self.ncbiTaxID and self.seqXMLversion != \"0.4\":\n raise ValueError(\n \"Attribute 'ncbiTaxID' on root is only supported in version 0.4\"\n )\n self.endElementNS = self.endSeqXMLElement\n self.startElementNS = self.startEntryElement\n\n def endSeqXMLElement(self, name, qname):\n \"\"\"Handle end of the seqXML element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for seqXML end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for seqXML end\")\n if localname != \"seqXML\":\n raise RuntimeError(\"Failed to find end of seqXML element\")\n self.startElementNS = None\n self.endElementNS = None\n\n def startEntryElement(self, name, qname, attrs):\n \"\"\"Set new entry with id and the optional entry source (PRIVATE).\"\"\"\n if name != (None, \"entry\"):\n raise ValueError(\"Expected to find the start of an entry element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for entry element\")\n record = SeqRecord(None, id=None)\n if self.speciesName is not None:\n record.annotations[\"organism\"] = self.speciesName\n if self.ncbiTaxID is not None:\n record.annotations[\"ncbi_taxid\"] = self.ncbiTaxID\n record.annotations[\"source\"] = self.source\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"id\":\n record.id = value\n elif localname == \"source\" and (\n self.seqXMLversion == \"0.3\" or self.seqXMLversion == \"0.4\"\n ):\n record.annotations[\"source\"] = value\n else:\n raise ValueError(\n f\"Unexpected attribute {localname} in entry element\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for entry attribute\"\n )\n if record.id is None:\n raise ValueError(\"Failed to find entry ID\")\n self.records.append(record)\n if self.seqXMLversion == \"0.1\":\n self.startElementNS = self.startEntryFieldElementVersion01\n else:\n self.startElementNS = self.startEntryFieldElement\n self.endElementNS = self.endEntryElement\n\n def endEntryElement(self, name, qname):\n \"\"\"Handle end of an entry element.\"\"\"\n if name != (None, \"entry\"):\n raise ValueError(\"Expected to find the end of an entry element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for entry element\")\n if self.records[-1].seq is None:\n raise ValueError(\"Failed to find a sequence for entry element\")\n self.startElementNS = self.startEntryElement\n self.endElementNS = self.endSeqXMLElement\n\n def startEntryFieldElementVersion01(self, name, qname, attrs):\n \"\"\"Receive a field of an entry element and forward it for version 0.1.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for {localname} element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for {localname} element\")\n if localname == \"species\":\n return self.startSpeciesElement(attrs)\n if localname == \"description\":\n return self.startDescriptionElement(attrs)\n if localname in (\"dnaSeq\", \"rnaSeq\", \"aaSeq\"):\n return self.startSequenceElement(attrs)\n if localname == \"alternativeID\":\n return self.startDBRefElement(attrs)\n if localname == \"property\":\n return self.startPropertyElement(attrs)\n raise ValueError(f\"Unexpected field {localname} in entry\")\n\n def startEntryFieldElement(self, name, qname, attrs):\n \"\"\"Receive a field of an entry element and forward it for versions >=0.2.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for {localname} element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for {localname} element\")\n if localname == \"species\":\n return self.startSpeciesElement(attrs)\n if localname == \"description\":\n return self.startDescriptionElement(attrs)\n if localname in (\"DNAseq\", \"RNAseq\", \"AAseq\"):\n return self.startSequenceElement(attrs)\n if localname == \"DBRef\":\n return self.startDBRefElement(attrs)\n if localname == \"property\":\n return self.startPropertyElement(attrs)\n raise ValueError(f\"Unexpected field {localname} in entry\")\n\n def startSpeciesElement(self, attrs):\n \"\"\"Parse the species information.\"\"\"\n name = None\n ncbiTaxID = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"name\":\n name = value\n elif localname == \"ncbiTaxID\":\n # check if it is an integer, but store as string\n number = int(value)\n ncbiTaxID = value\n else:\n raise ValueError(\n f\"Unexpected attribute '{key}' found in species tag\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for species attribute\"\n )\n # The attributes \"name\" and \"ncbiTaxID\" are required:\n if name is None:\n raise ValueError(\"Failed to find species name\")\n if ncbiTaxID is None:\n raise ValueError(\"Failed to find ncbiTaxId\")\n record = self.records[-1]\n # The keywords for the species annotation are taken from SwissIO\n record.annotations[\"organism\"] = name\n # TODO - Should have been a list to match SwissProt parser:\n record.annotations[\"ncbi_taxid\"] = ncbiTaxID\n self.endElementNS = self.endSpeciesElement\n\n def endSpeciesElement(self, name, qname):\n \"\"\"Handle end of a species element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for species end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for species end\")\n if localname != \"species\":\n raise RuntimeError(\"Failed to find end of species element\")\n self.endElementNS = self.endEntryElement\n\n def startDescriptionElement(self, attrs):\n \"\"\"Parse the description.\"\"\"\n if attrs:\n raise ValueError(\"Unexpected attributes found in description element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n self.endElementNS = self.endDescriptionElement\n\n def endDescriptionElement(self, name, qname):\n \"\"\"Handle the end of a description element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(\n f\"Unexpected namespace '{namespace}' for description end\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for description end\")\n if localname != \"description\":\n raise RuntimeError(\"Failed to find end of description element\")\n record = self.records[-1]\n description = self.data\n if description: # ignore if empty string\n record.description = description\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startSequenceElement(self, attrs):\n \"\"\"Parse DNA, RNA, or protein sequence.\"\"\"\n if attrs:\n raise ValueError(\"Unexpected attributes found in sequence element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n self.endElementNS = self.endSequenceElement\n\n def endSequenceElement(self, name, qname):\n \"\"\"Handle the end of a sequence element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for sequence end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for sequence end\")\n record = self.records[-1]\n if (localname == \"DNAseq\" and self.seqXMLversion != \"0.1\") or (\n localname == \"dnaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"DNA\"\n elif (localname == \"RNAseq\" and self.seqXMLversion != \"0.1\") or (\n localname == \"rnaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"RNA\"\n elif (localname == \"AAseq\" and self.seqXMLversion >= \"0.1\") or (\n localname == \"aaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"protein\"\n else:\n raise RuntimeError(\n f\"Failed to find end of sequence (localname = {localname})\"\n )\n record.seq = Seq(self.data)\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startDBRefElement(self, attrs):\n \"\"\"Parse a database cross reference.\"\"\"\n TYPE = None\n source = None\n ID = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"type\":\n TYPE = value\n elif localname == \"source\":\n source = value\n elif localname == \"id\":\n ID = value\n else:\n raise ValueError(\n f\"Unexpected attribute '{key}' found for DBRef element\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for DBRef attribute\"\n )\n # The attributes \"source\" and \"id\" are required, and \"type\" in versions\n # 0.2-0.3:\n if source is None:\n raise ValueError(\"Failed to find source for DBRef element\")\n if ID is None:\n raise ValueError(\"Failed to find id for DBRef element\")\n if TYPE is None and (\n self.seqXMLversion == \"0.2\" or self.seqXMLversion == \"0.3\"\n ):\n raise ValueError(\"Failed to find type for DBRef element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n record = self.records[-1]\n dbxref = f\"{source}:{ID}\"\n if dbxref not in record.dbxrefs:\n record.dbxrefs.append(dbxref)\n self.endElementNS = self.endDBRefElement\n\n def endDBRefElement(self, name, qname):\n \"\"\"Handle the end of a DBRef element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for DBRef element\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for DBRef element\")\n if (localname != \"DBRef\" and self.seqXMLversion != \"0.1\") or (\n localname != \"alternativeID\" and self.seqXMLversion == \"0.1\"\n ):\n raise RuntimeError(f\"Unexpected localname '{localname}' for DBRef element\")\n if self.data:\n raise RuntimeError(\n f\"Unexpected data received for DBRef element: '{self.data}'\"\n )\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startPropertyElement(self, attrs):\n \"\"\"Handle the start of a property element.\"\"\"\n property_name = None\n property_value = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"name\":\n property_name = value\n elif localname == \"value\":\n property_value = value\n else:\n raise ValueError(\n \"Unexpected attribute '%s' found for property element\", key\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for property attribute\"\n )\n # The attribute \"name\" is required:\n if property_name is None:\n raise ValueError(\"Failed to find name for property element\")\n record = self.records[-1]\n if property_name == \"molecule_type\":\n # At this point, record.annotations[\"molecule_type\"] is either\n # \"DNA\", \"RNA\", or \"protein\"; property_value may be a more detailed\n # description such as \"mRNA\" or \"genomic DNA\".\n assert record.annotations[property_name] in property_value\n record.annotations[property_name] = property_value\n else:\n if property_name not in record.annotations:\n record.annotations[property_name] = []\n record.annotations[property_name].append(property_value)\n self.endElementNS = self.endPropertyElement\n\n def endPropertyElement(self, name, qname):\n \"\"\"Handle the end of a property element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(\n f\"Unexpected namespace '{namespace}' for property element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for property element\")\n if localname != \"property\":\n raise RuntimeError(\n f\"Unexpected localname '{localname}' for property element\"\n )\n self.endElementNS = self.endEntryElement\n\n def characters(self, data):\n \"\"\"Handle character data.\"\"\"\n if self.data is not None:\n self.data += data\n\n\nclass SeqXmlIterator(SequenceIterator):\n \"\"\"Parser for seqXML files.\n\n Parses seqXML files and creates SeqRecords.\n Assumes valid seqXML please validate beforehand.\n It is assumed that all information for one record can be found within a\n record element or above. Two types of methods are called when the start\n tag of an element is reached. To receive only the attributes of an\n element before its end tag is reached implement _attr_TAGNAME.\n To get an element and its children as a DOM tree implement _elem_TAGNAME.\n Everything that is part of the DOM tree will not trigger any further\n method calls.\n \"\"\"\n\n BLOCK = 1024\n\n def __init__(self, stream_or_path, namespace=None):\n \"\"\"Create the object and initialize the XML parser.\"\"\"\n # Make sure we got a binary handle. If we got a text handle, then\n # the parser will still run but unicode characters will be garbled\n # if the text handle was opened with a different encoding than the\n # one specified in the XML file. With a binary handle, the correct\n # encoding is picked up by the parser from the XML file.\n self.parser = sax.make_parser()\n content_handler = ContentHandler()\n self.parser.setContentHandler(content_handler)\n self.parser.setFeature(handler.feature_namespaces, True)\n super().__init__(stream_or_path, mode=\"b\", fmt=\"SeqXML\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n parser = self.parser\n content_handler = parser.getContentHandler()\n BLOCK = self.BLOCK\n while True:\n # Read in another block of the file...\n text = handle.read(BLOCK)\n if not text:\n if content_handler.startElementNS is None:\n raise ValueError(\"Empty file.\")\n else:\n raise ValueError(\"XML file contains no data.\")\n parser.feed(text)\n seqXMLversion = content_handler.seqXMLversion\n if seqXMLversion is not None:\n break\n self.seqXMLversion = seqXMLversion\n self.source = content_handler.source\n self.sourceVersion = content_handler.sourceVersion\n self.ncbiTaxID = content_handler.ncbiTaxID\n self.speciesName = content_handler.speciesName\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the XML file.\"\"\"\n parser = self.parser\n content_handler = parser.getContentHandler()\n records = content_handler.records\n BLOCK = self.BLOCK\n while True:\n if len(records) > 1:\n # Then at least the first record is finished\n record = records.pop(0)\n yield record\n # Read in another block of the file...\n text = handle.read(BLOCK)\n if not text:\n break\n parser.feed(text)\n # We have reached the end of the XML file;\n # send out the remaining records\n yield from records\n records.clear()\n parser.close()\n\n\nclass SeqXmlWriter(SequenceWriter):\n \"\"\"Writes SeqRecords into seqXML file.\n\n SeqXML requires the SeqRecord annotations to specify the molecule_type;\n the molecule type is required to contain the term \"DNA\", \"RNA\", or\n \"protein\".\n \"\"\"\n\n def __init__(\n self, target, source=None, source_version=None, species=None, ncbiTaxId=None\n ):\n \"\"\"Create Object and start the xml generator.\n\n Arguments:\n - target - Output stream opened in binary mode, or a path to a file.\n - source - The source program/database of the file, for example\n UniProt.\n - source_version - The version or release number of the source\n program or database from which the data originated.\n - species - The scientific name of the species of origin of all\n entries in the file.\n - ncbiTaxId - The NCBI taxonomy identifier of the species of origin.\n\n \"\"\"\n super().__init__(target, \"wb\")\n handle = self.handle\n self.xml_generator = XMLGenerator(handle, \"utf-8\")\n self.xml_generator.startDocument()\n self.source = source\n self.source_version = source_version\n self.species = species\n self.ncbiTaxId = ncbiTaxId\n\n def write_header(self):\n \"\"\"Write root node with document metadata.\"\"\"\n attrs = {\n \"xmlns:xsi\": \"http://www.w3.org/2001/XMLSchema-instance\",\n \"xsi:noNamespaceSchemaLocation\": \"http://www.seqxml.org/0.4/seqxml.xsd\",\n \"seqXMLversion\": \"0.4\",\n }\n\n if self.source is not None:\n attrs[\"source\"] = self.source\n if self.source_version is not None:\n attrs[\"sourceVersion\"] = self.source_version\n if self.species is not None:\n if not isinstance(self.species, str):\n raise TypeError(\"species should be of type string\")\n attrs[\"speciesName\"] = self.species\n if self.ncbiTaxId is not None:\n if not isinstance(self.ncbiTaxId, (str, int)):\n raise TypeError(\"ncbiTaxID should be of type string or int\")\n attrs[\"ncbiTaxID\"] = self.ncbiTaxId\n\n self.xml_generator.startElement(\"seqXML\", AttributesImpl(attrs))\n\n def write_record(self, record):\n \"\"\"Write one record.\"\"\"\n if not record.id or record.id == \"<unknown id>\":\n raise ValueError(\"SeqXML requires identifier\")\n\n if not isinstance(record.id, str):\n raise TypeError(\"Identifier should be of type string\")\n\n attrb = {\"id\": record.id}\n\n if (\n \"source\" in record.annotations\n and self.source != record.annotations[\"source\"]\n ):\n if not isinstance(record.annotations[\"source\"], str):\n raise TypeError(\"source should be of type string\")\n attrb[\"source\"] = record.annotations[\"source\"]\n\n self.xml_generator.startElement(\"entry\", AttributesImpl(attrb))\n self._write_species(record)\n self._write_description(record)\n self._write_seq(record)\n self._write_dbxrefs(record)\n self._write_properties(record)\n self.xml_generator.endElement(\"entry\")\n\n def write_footer(self):\n \"\"\"Close the root node and finish the XML document.\"\"\"\n self.xml_generator.endElement(\"seqXML\")\n self.xml_generator.endDocument()\n\n def _write_species(self, record):\n \"\"\"Write the species if given (PRIVATE).\"\"\"\n local_ncbi_taxid = None\n if \"ncbi_taxid\" in record.annotations:\n local_ncbi_taxid = record.annotations[\"ncbi_taxid\"]\n if isinstance(local_ncbi_taxid, list):\n # SwissProt parser uses a list (which could cope with chimeras)\n if len(local_ncbi_taxid) == 1:\n local_ncbi_taxid = local_ncbi_taxid[0]\n elif len(local_ncbi_taxid) == 0:\n local_ncbi_taxid = None\n else:\n raise ValueError(\n \"Multiple entries for record.annotations['ncbi_taxid'], %r\"\n % local_ncbi_taxid\n )\n if \"organism\" in record.annotations and local_ncbi_taxid:\n local_org = record.annotations[\"organism\"]\n\n if not isinstance(local_org, str):\n raise TypeError(\"organism should be of type string\")\n\n if not isinstance(local_ncbi_taxid, (str, int)):\n raise TypeError(\"ncbiTaxID should be of type string or int\")\n\n # The local species definition is only written if it differs from the global species definition\n if local_org != self.species or local_ncbi_taxid != self.ncbiTaxId:\n attr = {\"name\": local_org, \"ncbiTaxID\": str(local_ncbi_taxid)}\n self.xml_generator.startElement(\"species\", AttributesImpl(attr))\n self.xml_generator.endElement(\"species\")\n\n def _write_description(self, record):\n \"\"\"Write the description if given (PRIVATE).\"\"\"\n if record.description:\n if not isinstance(record.description, str):\n raise TypeError(\"Description should be of type string\")\n\n description = record.description\n if description == \"<unknown description>\":\n description = \"\"\n\n if len(record.description) > 0:\n self.xml_generator.startElement(\"description\", AttributesImpl({}))\n self.xml_generator.characters(description)\n self.xml_generator.endElement(\"description\")\n\n def _write_seq(self, record):\n \"\"\"Write the sequence (PRIVATE).\n\n Note that SeqXML requires the molecule type to contain the term\n \"DNA\", \"RNA\", or \"protein\".\n \"\"\"\n seq = bytes(record.seq)\n\n if not len(seq) > 0:\n raise ValueError(\"The sequence length should be greater than 0\")\n\n molecule_type = record.annotations.get(\"molecule_type\")\n if molecule_type is None:\n raise ValueError(\"molecule_type is not defined\")\n elif \"DNA\" in molecule_type:\n seqElem = \"DNAseq\"\n elif \"RNA\" in molecule_type:\n seqElem = \"RNAseq\"\n elif \"protein\" in molecule_type:\n seqElem = \"AAseq\"\n else:\n raise ValueError(f\"unknown molecule_type '{molecule_type}'\")\n\n self.xml_generator.startElement(seqElem, AttributesImpl({}))\n self.xml_generator.characters(seq)\n self.xml_generator.endElement(seqElem)\n\n def _write_dbxrefs(self, record):\n \"\"\"Write all database cross references (PRIVATE).\"\"\"\n if record.dbxrefs is not None:\n for dbxref in record.dbxrefs:\n if not isinstance(dbxref, str):\n raise TypeError(\"dbxrefs should be of type list of string\")\n if dbxref.find(\":\") < 1:\n raise ValueError(\n \"dbxrefs should be in the form ['source:id', 'source:id' ]\"\n )\n\n dbsource, dbid = dbxref.split(\":\", 1)\n\n attr = {\"source\": dbsource, \"id\": dbid}\n self.xml_generator.startElement(\"DBRef\", AttributesImpl(attr))\n self.xml_generator.endElement(\"DBRef\")\n\n def _write_properties(self, record):\n \"\"\"Write all annotations that are key value pairs with values of a primitive type or list of primitive types (PRIVATE).\"\"\"\n for key, value in record.annotations.items():\n if key not in (\"organism\", \"ncbi_taxid\", \"source\"):\n if value is None:\n attr = {\"name\": key}\n self.xml_generator.startElement(\"property\", AttributesImpl(attr))\n self.xml_generator.endElement(\"property\")\n\n elif isinstance(value, list):\n for v in value:\n if v is None:\n attr = {\"name\": key}\n else:\n attr = {\"name\": key, \"value\": str(v)}\n self.xml_generator.startElement(\n \"property\", AttributesImpl(attr)\n )\n self.xml_generator.endElement(\"property\")\n\n elif isinstance(value, (int, float, str)):\n attr = {\"name\": key, \"value\": str(value)}\n self.xml_generator.startElement(\"property\", AttributesImpl(attr))\n self.xml_generator.endElement(\"property\")\n", "path": "Bio/SeqIO/SeqXmlIO.py" } ]
[ { "content": "# Copyright 2010 by Thomas Schmitt.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Bio.SeqIO support for the \"seqxml\" file format, SeqXML.\n\nThis module is for reading and writing SeqXML format files as\nSeqRecord objects, and is expected to be used via the Bio.SeqIO API.\n\nSeqXML is a lightweight XML format which is supposed be an alternative for\nFASTA files. For more Information see http://www.seqXML.org and Schmitt et al\n(2011), https://doi.org/10.1093/bib/bbr025\n\"\"\"\n\nfrom xml import sax\nfrom xml.sax import handler\nfrom xml.sax.saxutils import XMLGenerator\nfrom xml.sax.xmlreader import AttributesImpl\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .Interfaces import SequenceIterator\nfrom .Interfaces import SequenceWriter\n\n\nclass ContentHandler(handler.ContentHandler):\n \"\"\"Handles XML events generated by the parser (PRIVATE).\"\"\"\n\n def __init__(self):\n \"\"\"Create a handler to handle XML events.\"\"\"\n super().__init__()\n self.source = None\n self.sourceVersion = None\n self.seqXMLversion = None\n self.ncbiTaxID = None\n self.speciesName = None\n self.startElementNS = None\n self.data = None\n self.records = []\n\n def startDocument(self):\n \"\"\"Set XML handlers when an XML declaration is found.\"\"\"\n self.startElementNS = self.startSeqXMLElement\n\n def startSeqXMLElement(self, name, qname, attrs):\n \"\"\"Handle start of a seqXML element.\"\"\"\n if name != (None, \"seqXML\"):\n raise ValueError(\"Failed to find the start of seqXML element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for seqXML element\")\n schema = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"source\":\n self.source = value\n elif localname == \"sourceVersion\":\n self.sourceVersion = value\n elif localname == \"seqXMLversion\":\n self.seqXMLversion = value\n elif localname == \"ncbiTaxID\":\n # check if it is an integer, but store as string\n number = int(value)\n self.ncbiTaxID = value\n elif localname == \"speciesName\":\n self.speciesName = value\n else:\n raise ValueError(\"Unexpected attribute for XML Schema\")\n elif namespace == \"http://www.w3.org/2001/XMLSchema-instance\":\n if localname == \"noNamespaceSchemaLocation\":\n schema = value\n else:\n raise ValueError(\"Unexpected attribute for XML Schema in namespace\")\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for seqXML attribute\"\n )\n if self.seqXMLversion is None:\n raise ValueError(\"Failed to find seqXMLversion\")\n elif self.seqXMLversion not in (\"0.1\", \"0.2\", \"0.3\", \"0.4\"):\n raise ValueError(\"Unsupported seqXMLversion\")\n url = f\"http://www.seqxml.org/{self.seqXMLversion}/seqxml.xsd\"\n if schema is not None and schema != url:\n raise ValueError(\n \"XML Schema '%s' found not consistent with reported seqXML version %s\"\n % (schema, self.seqXMLversion)\n )\n # speciesName and ncbiTaxID attributes on the root are only supported\n # in 0.4\n if self.speciesName and self.seqXMLversion != \"0.4\":\n raise ValueError(\n \"Attribute 'speciesName' on root is only supported in version 0.4\"\n )\n if self.ncbiTaxID and self.seqXMLversion != \"0.4\":\n raise ValueError(\n \"Attribute 'ncbiTaxID' on root is only supported in version 0.4\"\n )\n self.endElementNS = self.endSeqXMLElement\n self.startElementNS = self.startEntryElement\n\n def endSeqXMLElement(self, name, qname):\n \"\"\"Handle end of the seqXML element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for seqXML end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for seqXML end\")\n if localname != \"seqXML\":\n raise RuntimeError(\"Failed to find end of seqXML element\")\n self.startElementNS = None\n self.endElementNS = None\n\n def startEntryElement(self, name, qname, attrs):\n \"\"\"Set new entry with id and the optional entry source (PRIVATE).\"\"\"\n if name != (None, \"entry\"):\n raise ValueError(\"Expected to find the start of an entry element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for entry element\")\n record = SeqRecord(None, id=None)\n if self.speciesName is not None:\n record.annotations[\"organism\"] = self.speciesName\n if self.ncbiTaxID is not None:\n record.annotations[\"ncbi_taxid\"] = self.ncbiTaxID\n record.annotations[\"source\"] = self.source\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"id\":\n record.id = value\n elif localname == \"source\" and (\n self.seqXMLversion == \"0.3\" or self.seqXMLversion == \"0.4\"\n ):\n record.annotations[\"source\"] = value\n else:\n raise ValueError(\n f\"Unexpected attribute {localname} in entry element\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for entry attribute\"\n )\n if record.id is None:\n raise ValueError(\"Failed to find entry ID\")\n self.records.append(record)\n if self.seqXMLversion == \"0.1\":\n self.startElementNS = self.startEntryFieldElementVersion01\n else:\n self.startElementNS = self.startEntryFieldElement\n self.endElementNS = self.endEntryElement\n\n def endEntryElement(self, name, qname):\n \"\"\"Handle end of an entry element.\"\"\"\n if name != (None, \"entry\"):\n raise ValueError(\"Expected to find the end of an entry element\")\n if qname is not None:\n raise RuntimeError(\"Unexpected qname for entry element\")\n if self.records[-1].seq is None:\n raise ValueError(\"Failed to find a sequence for entry element\")\n self.startElementNS = self.startEntryElement\n self.endElementNS = self.endSeqXMLElement\n\n def startEntryFieldElementVersion01(self, name, qname, attrs):\n \"\"\"Receive a field of an entry element and forward it for version 0.1.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for {localname} element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for {localname} element\")\n if localname == \"species\":\n return self.startSpeciesElement(attrs)\n if localname == \"description\":\n return self.startDescriptionElement(attrs)\n if localname in (\"dnaSeq\", \"rnaSeq\", \"aaSeq\"):\n return self.startSequenceElement(attrs)\n if localname == \"alternativeID\":\n return self.startDBRefElement(attrs)\n if localname == \"property\":\n return self.startPropertyElement(attrs)\n raise ValueError(f\"Unexpected field {localname} in entry\")\n\n def startEntryFieldElement(self, name, qname, attrs):\n \"\"\"Receive a field of an entry element and forward it for versions >=0.2.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for {localname} element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for {localname} element\")\n if localname == \"species\":\n return self.startSpeciesElement(attrs)\n if localname == \"description\":\n return self.startDescriptionElement(attrs)\n if localname in (\"DNAseq\", \"RNAseq\", \"AAseq\"):\n return self.startSequenceElement(attrs)\n if localname == \"DBRef\":\n return self.startDBRefElement(attrs)\n if localname == \"property\":\n return self.startPropertyElement(attrs)\n raise ValueError(f\"Unexpected field {localname} in entry\")\n\n def startSpeciesElement(self, attrs):\n \"\"\"Parse the species information.\"\"\"\n name = None\n ncbiTaxID = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"name\":\n name = value\n elif localname == \"ncbiTaxID\":\n # check if it is an integer, but store as string\n number = int(value)\n ncbiTaxID = value\n else:\n raise ValueError(\n f\"Unexpected attribute '{key}' found in species tag\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for species attribute\"\n )\n # The attributes \"name\" and \"ncbiTaxID\" are required:\n if name is None:\n raise ValueError(\"Failed to find species name\")\n if ncbiTaxID is None:\n raise ValueError(\"Failed to find ncbiTaxId\")\n record = self.records[-1]\n # The keywords for the species annotation are taken from SwissIO\n record.annotations[\"organism\"] = name\n # TODO - Should have been a list to match SwissProt parser:\n record.annotations[\"ncbi_taxid\"] = ncbiTaxID\n self.endElementNS = self.endSpeciesElement\n\n def endSpeciesElement(self, name, qname):\n \"\"\"Handle end of a species element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for species end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for species end\")\n if localname != \"species\":\n raise RuntimeError(\"Failed to find end of species element\")\n self.endElementNS = self.endEntryElement\n\n def startDescriptionElement(self, attrs):\n \"\"\"Parse the description.\"\"\"\n if attrs:\n raise ValueError(\"Unexpected attributes found in description element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n self.endElementNS = self.endDescriptionElement\n\n def endDescriptionElement(self, name, qname):\n \"\"\"Handle the end of a description element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(\n f\"Unexpected namespace '{namespace}' for description end\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for description end\")\n if localname != \"description\":\n raise RuntimeError(\"Failed to find end of description element\")\n record = self.records[-1]\n description = self.data\n if description: # ignore if empty string\n record.description = description\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startSequenceElement(self, attrs):\n \"\"\"Parse DNA, RNA, or protein sequence.\"\"\"\n if attrs:\n raise ValueError(\"Unexpected attributes found in sequence element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n self.endElementNS = self.endSequenceElement\n\n def endSequenceElement(self, name, qname):\n \"\"\"Handle the end of a sequence element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for sequence end\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for sequence end\")\n record = self.records[-1]\n if (localname == \"DNAseq\" and self.seqXMLversion != \"0.1\") or (\n localname == \"dnaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"DNA\"\n elif (localname == \"RNAseq\" and self.seqXMLversion != \"0.1\") or (\n localname == \"rnaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"RNA\"\n elif (localname == \"AAseq\" and self.seqXMLversion >= \"0.1\") or (\n localname == \"aaSeq\" and self.seqXMLversion == \"0.1\"\n ):\n record.annotations[\"molecule_type\"] = \"protein\"\n else:\n raise RuntimeError(\n f\"Failed to find end of sequence (localname = {localname})\"\n )\n record.seq = Seq(self.data)\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startDBRefElement(self, attrs):\n \"\"\"Parse a database cross reference.\"\"\"\n TYPE = None\n source = None\n ID = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"type\":\n TYPE = value\n elif localname == \"source\":\n source = value\n elif localname == \"id\":\n ID = value\n else:\n raise ValueError(\n f\"Unexpected attribute '{key}' found for DBRef element\"\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for DBRef attribute\"\n )\n # The attributes \"source\" and \"id\" are required, and \"type\" in versions\n # 0.2-0.3:\n if source is None:\n raise ValueError(\"Failed to find source for DBRef element\")\n if ID is None:\n raise ValueError(\"Failed to find id for DBRef element\")\n if TYPE is None and (\n self.seqXMLversion == \"0.2\" or self.seqXMLversion == \"0.3\"\n ):\n raise ValueError(\"Failed to find type for DBRef element\")\n if self.data is not None:\n raise RuntimeError(f\"Unexpected data found: '{self.data}'\")\n self.data = \"\"\n record = self.records[-1]\n dbxref = f\"{source}:{ID}\"\n if dbxref not in record.dbxrefs:\n record.dbxrefs.append(dbxref)\n self.endElementNS = self.endDBRefElement\n\n def endDBRefElement(self, name, qname):\n \"\"\"Handle the end of a DBRef element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(f\"Unexpected namespace '{namespace}' for DBRef element\")\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for DBRef element\")\n if (localname != \"DBRef\" and self.seqXMLversion != \"0.1\") or (\n localname != \"alternativeID\" and self.seqXMLversion == \"0.1\"\n ):\n raise RuntimeError(f\"Unexpected localname '{localname}' for DBRef element\")\n if self.data:\n raise RuntimeError(\n f\"Unexpected data received for DBRef element: '{self.data}'\"\n )\n self.data = None\n self.endElementNS = self.endEntryElement\n\n def startPropertyElement(self, attrs):\n \"\"\"Handle the start of a property element.\"\"\"\n property_name = None\n property_value = None\n for key, value in attrs.items():\n namespace, localname = key\n if namespace is None:\n if localname == \"name\":\n property_name = value\n elif localname == \"value\":\n property_value = value\n else:\n raise ValueError(\n \"Unexpected attribute '%s' found for property element\", key\n )\n else:\n raise ValueError(\n f\"Unexpected namespace '{namespace}' for property attribute\"\n )\n # The attribute \"name\" is required:\n if property_name is None:\n raise ValueError(\"Failed to find name for property element\")\n record = self.records[-1]\n if property_name == \"molecule_type\":\n # At this point, record.annotations[\"molecule_type\"] is either\n # \"DNA\", \"RNA\", or \"protein\"; property_value may be a more detailed\n # description such as \"mRNA\" or \"genomic DNA\".\n assert record.annotations[property_name] in property_value\n record.annotations[property_name] = property_value\n else:\n if property_name not in record.annotations:\n record.annotations[property_name] = []\n record.annotations[property_name].append(property_value)\n self.endElementNS = self.endPropertyElement\n\n def endPropertyElement(self, name, qname):\n \"\"\"Handle the end of a property element.\"\"\"\n namespace, localname = name\n if namespace is not None:\n raise RuntimeError(\n f\"Unexpected namespace '{namespace}' for property element\"\n )\n if qname is not None:\n raise RuntimeError(f\"Unexpected qname '{qname}' for property element\")\n if localname != \"property\":\n raise RuntimeError(\n f\"Unexpected localname '{localname}' for property element\"\n )\n self.endElementNS = self.endEntryElement\n\n def characters(self, data):\n \"\"\"Handle character data.\"\"\"\n if self.data is not None:\n self.data += data\n\n\nclass SeqXmlIterator(SequenceIterator):\n \"\"\"Parser for seqXML files.\n\n Parses seqXML files and creates SeqRecords.\n Assumes valid seqXML please validate beforehand.\n It is assumed that all information for one record can be found within a\n record element or above. Two types of methods are called when the start\n tag of an element is reached. To receive only the attributes of an\n element before its end tag is reached implement _attr_TAGNAME.\n To get an element and its children as a DOM tree implement _elem_TAGNAME.\n Everything that is part of the DOM tree will not trigger any further\n method calls.\n \"\"\"\n\n # Small block size can be a problem with libexpat 2.6.0 onwards:\n BLOCK = 4096\n\n def __init__(self, stream_or_path, namespace=None):\n \"\"\"Create the object and initialize the XML parser.\"\"\"\n # Make sure we got a binary handle. If we got a text handle, then\n # the parser will still run but unicode characters will be garbled\n # if the text handle was opened with a different encoding than the\n # one specified in the XML file. With a binary handle, the correct\n # encoding is picked up by the parser from the XML file.\n self.parser = sax.make_parser()\n content_handler = ContentHandler()\n self.parser.setContentHandler(content_handler)\n self.parser.setFeature(handler.feature_namespaces, True)\n super().__init__(stream_or_path, mode=\"b\", fmt=\"SeqXML\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n parser = self.parser\n content_handler = parser.getContentHandler()\n BLOCK = self.BLOCK\n while True:\n # Read in another block of the file...\n text = handle.read(BLOCK)\n if not text:\n if content_handler.startElementNS is None:\n raise ValueError(\"Empty file.\")\n else:\n raise ValueError(\"XML file contains no data.\")\n parser.feed(text)\n seqXMLversion = content_handler.seqXMLversion\n if seqXMLversion is not None:\n break\n self.seqXMLversion = seqXMLversion\n self.source = content_handler.source\n self.sourceVersion = content_handler.sourceVersion\n self.ncbiTaxID = content_handler.ncbiTaxID\n self.speciesName = content_handler.speciesName\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the XML file.\"\"\"\n parser = self.parser\n content_handler = parser.getContentHandler()\n records = content_handler.records\n BLOCK = self.BLOCK\n while True:\n if len(records) > 1:\n # Then at least the first record is finished\n record = records.pop(0)\n yield record\n # Read in another block of the file...\n text = handle.read(BLOCK)\n if not text:\n break\n parser.feed(text)\n # We have reached the end of the XML file;\n # send out the remaining records\n yield from records\n records.clear()\n parser.close()\n\n\nclass SeqXmlWriter(SequenceWriter):\n \"\"\"Writes SeqRecords into seqXML file.\n\n SeqXML requires the SeqRecord annotations to specify the molecule_type;\n the molecule type is required to contain the term \"DNA\", \"RNA\", or\n \"protein\".\n \"\"\"\n\n def __init__(\n self, target, source=None, source_version=None, species=None, ncbiTaxId=None\n ):\n \"\"\"Create Object and start the xml generator.\n\n Arguments:\n - target - Output stream opened in binary mode, or a path to a file.\n - source - The source program/database of the file, for example\n UniProt.\n - source_version - The version or release number of the source\n program or database from which the data originated.\n - species - The scientific name of the species of origin of all\n entries in the file.\n - ncbiTaxId - The NCBI taxonomy identifier of the species of origin.\n\n \"\"\"\n super().__init__(target, \"wb\")\n handle = self.handle\n self.xml_generator = XMLGenerator(handle, \"utf-8\")\n self.xml_generator.startDocument()\n self.source = source\n self.source_version = source_version\n self.species = species\n self.ncbiTaxId = ncbiTaxId\n\n def write_header(self):\n \"\"\"Write root node with document metadata.\"\"\"\n attrs = {\n \"xmlns:xsi\": \"http://www.w3.org/2001/XMLSchema-instance\",\n \"xsi:noNamespaceSchemaLocation\": \"http://www.seqxml.org/0.4/seqxml.xsd\",\n \"seqXMLversion\": \"0.4\",\n }\n\n if self.source is not None:\n attrs[\"source\"] = self.source\n if self.source_version is not None:\n attrs[\"sourceVersion\"] = self.source_version\n if self.species is not None:\n if not isinstance(self.species, str):\n raise TypeError(\"species should be of type string\")\n attrs[\"speciesName\"] = self.species\n if self.ncbiTaxId is not None:\n if not isinstance(self.ncbiTaxId, (str, int)):\n raise TypeError(\"ncbiTaxID should be of type string or int\")\n attrs[\"ncbiTaxID\"] = self.ncbiTaxId\n\n self.xml_generator.startElement(\"seqXML\", AttributesImpl(attrs))\n\n def write_record(self, record):\n \"\"\"Write one record.\"\"\"\n if not record.id or record.id == \"<unknown id>\":\n raise ValueError(\"SeqXML requires identifier\")\n\n if not isinstance(record.id, str):\n raise TypeError(\"Identifier should be of type string\")\n\n attrb = {\"id\": record.id}\n\n if (\n \"source\" in record.annotations\n and self.source != record.annotations[\"source\"]\n ):\n if not isinstance(record.annotations[\"source\"], str):\n raise TypeError(\"source should be of type string\")\n attrb[\"source\"] = record.annotations[\"source\"]\n\n self.xml_generator.startElement(\"entry\", AttributesImpl(attrb))\n self._write_species(record)\n self._write_description(record)\n self._write_seq(record)\n self._write_dbxrefs(record)\n self._write_properties(record)\n self.xml_generator.endElement(\"entry\")\n\n def write_footer(self):\n \"\"\"Close the root node and finish the XML document.\"\"\"\n self.xml_generator.endElement(\"seqXML\")\n self.xml_generator.endDocument()\n\n def _write_species(self, record):\n \"\"\"Write the species if given (PRIVATE).\"\"\"\n local_ncbi_taxid = None\n if \"ncbi_taxid\" in record.annotations:\n local_ncbi_taxid = record.annotations[\"ncbi_taxid\"]\n if isinstance(local_ncbi_taxid, list):\n # SwissProt parser uses a list (which could cope with chimeras)\n if len(local_ncbi_taxid) == 1:\n local_ncbi_taxid = local_ncbi_taxid[0]\n elif len(local_ncbi_taxid) == 0:\n local_ncbi_taxid = None\n else:\n raise ValueError(\n \"Multiple entries for record.annotations['ncbi_taxid'], %r\"\n % local_ncbi_taxid\n )\n if \"organism\" in record.annotations and local_ncbi_taxid:\n local_org = record.annotations[\"organism\"]\n\n if not isinstance(local_org, str):\n raise TypeError(\"organism should be of type string\")\n\n if not isinstance(local_ncbi_taxid, (str, int)):\n raise TypeError(\"ncbiTaxID should be of type string or int\")\n\n # The local species definition is only written if it differs from the global species definition\n if local_org != self.species or local_ncbi_taxid != self.ncbiTaxId:\n attr = {\"name\": local_org, \"ncbiTaxID\": str(local_ncbi_taxid)}\n self.xml_generator.startElement(\"species\", AttributesImpl(attr))\n self.xml_generator.endElement(\"species\")\n\n def _write_description(self, record):\n \"\"\"Write the description if given (PRIVATE).\"\"\"\n if record.description:\n if not isinstance(record.description, str):\n raise TypeError(\"Description should be of type string\")\n\n description = record.description\n if description == \"<unknown description>\":\n description = \"\"\n\n if len(record.description) > 0:\n self.xml_generator.startElement(\"description\", AttributesImpl({}))\n self.xml_generator.characters(description)\n self.xml_generator.endElement(\"description\")\n\n def _write_seq(self, record):\n \"\"\"Write the sequence (PRIVATE).\n\n Note that SeqXML requires the molecule type to contain the term\n \"DNA\", \"RNA\", or \"protein\".\n \"\"\"\n seq = bytes(record.seq)\n\n if not len(seq) > 0:\n raise ValueError(\"The sequence length should be greater than 0\")\n\n molecule_type = record.annotations.get(\"molecule_type\")\n if molecule_type is None:\n raise ValueError(\"molecule_type is not defined\")\n elif \"DNA\" in molecule_type:\n seqElem = \"DNAseq\"\n elif \"RNA\" in molecule_type:\n seqElem = \"RNAseq\"\n elif \"protein\" in molecule_type:\n seqElem = \"AAseq\"\n else:\n raise ValueError(f\"unknown molecule_type '{molecule_type}'\")\n\n self.xml_generator.startElement(seqElem, AttributesImpl({}))\n self.xml_generator.characters(seq)\n self.xml_generator.endElement(seqElem)\n\n def _write_dbxrefs(self, record):\n \"\"\"Write all database cross references (PRIVATE).\"\"\"\n if record.dbxrefs is not None:\n for dbxref in record.dbxrefs:\n if not isinstance(dbxref, str):\n raise TypeError(\"dbxrefs should be of type list of string\")\n if dbxref.find(\":\") < 1:\n raise ValueError(\n \"dbxrefs should be in the form ['source:id', 'source:id' ]\"\n )\n\n dbsource, dbid = dbxref.split(\":\", 1)\n\n attr = {\"source\": dbsource, \"id\": dbid}\n self.xml_generator.startElement(\"DBRef\", AttributesImpl(attr))\n self.xml_generator.endElement(\"DBRef\")\n\n def _write_properties(self, record):\n \"\"\"Write all annotations that are key value pairs with values of a primitive type or list of primitive types (PRIVATE).\"\"\"\n for key, value in record.annotations.items():\n if key not in (\"organism\", \"ncbi_taxid\", \"source\"):\n if value is None:\n attr = {\"name\": key}\n self.xml_generator.startElement(\"property\", AttributesImpl(attr))\n self.xml_generator.endElement(\"property\")\n\n elif isinstance(value, list):\n for v in value:\n if v is None:\n attr = {\"name\": key}\n else:\n attr = {\"name\": key, \"value\": str(v)}\n self.xml_generator.startElement(\n \"property\", AttributesImpl(attr)\n )\n self.xml_generator.endElement(\"property\")\n\n elif isinstance(value, (int, float, str)):\n attr = {\"name\": key, \"value\": str(value)}\n self.xml_generator.startElement(\"property\", AttributesImpl(attr))\n self.xml_generator.endElement(\"property\")\n", "path": "Bio/SeqIO/SeqXmlIO.py" } ]
diff --git a/Bio/SeqIO/SeqXmlIO.py b/Bio/SeqIO/SeqXmlIO.py index 4b95491c4c8..8fe75ebb728 100644 --- a/Bio/SeqIO/SeqXmlIO.py +++ b/Bio/SeqIO/SeqXmlIO.py @@ -441,7 +441,8 @@ class SeqXmlIterator(SequenceIterator): method calls. """ - BLOCK = 1024 + # Small block size can be a problem with libexpat 2.6.0 onwards: + BLOCK = 4096 def __init__(self, stream_or_path, namespace=None): """Create the object and initialize the XML parser."""
aio-libs__aiohttp-4415
Task was destroyed but it's pending On master, I see outputs from `run_app()` like follows: ``` ======== Running on http://0.0.0.0:8080 ======== (Press CTRL+C to quit) Task was destroyed but it is pending! task: <Task pending name='Task-4' coro=<RequestHandler.start() done, defined at /home/andrew/.virtualenvs/multidict/lib/python3.8/site-packages/aiohttp/web_protocol.py:437> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f8822ffef10>()]>> ``` I suspect it happens on errors and keepalive closing in `web_protocol.py`. The message should go.
[ { "content": "import asyncio\nimport asyncio.streams\nimport traceback\nfrom collections import deque\nfrom contextlib import suppress\nfrom html import escape as html_escape\nfrom http import HTTPStatus\nfrom logging import Logger\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Awaitable,\n Callable,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport yarl\n\nfrom .abc import (\n AbstractAccessLogger,\n AbstractAsyncAccessLogger,\n AbstractStreamWriter,\n)\nfrom .base_protocol import BaseProtocol\nfrom .helpers import ceil_timeout, current_task\nfrom .http import (\n HttpProcessingError,\n HttpRequestParser,\n HttpVersion10,\n RawRequestMessage,\n StreamWriter,\n)\nfrom .log import access_logger, server_logger\nfrom .streams import EMPTY_PAYLOAD, StreamReader\nfrom .tcp_helpers import tcp_keepalive\nfrom .web_exceptions import HTTPException\nfrom .web_log import AccessLogger\nfrom .web_request import BaseRequest\nfrom .web_response import Response, StreamResponse\n\n__all__ = ('RequestHandler', 'RequestPayloadError', 'PayloadAccessError')\n\nif TYPE_CHECKING: # pragma: no cover\n from .web_server import Server # noqa\n\n\n_RequestFactory = Callable[[RawRequestMessage,\n StreamReader,\n 'RequestHandler',\n AbstractStreamWriter,\n 'asyncio.Task[None]'],\n BaseRequest]\n\n_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]\n_AnyAbstractAccessLogger = Union[\n Type[AbstractAsyncAccessLogger],\n Type[AbstractAccessLogger],\n]\n\n\nERROR = RawRequestMessage(\n 'UNKNOWN', '/', HttpVersion10, {},\n {}, True, False, False, False, yarl.URL('/'))\n\n\nclass RequestPayloadError(Exception):\n \"\"\"Payload parsing error.\"\"\"\n\n\nclass PayloadAccessError(Exception):\n \"\"\"Payload was accessed after response was sent.\"\"\"\n\n\nclass AccessLoggerWrapper(AbstractAsyncAccessLogger):\n \"\"\"\n Wraps an AbstractAccessLogger so it behaves\n like an AbstractAsyncAccessLogger.\n \"\"\"\n def __init__(self, access_logger: AbstractAccessLogger):\n self.access_logger = access_logger\n super().__init__()\n\n async def log(self,\n request: BaseRequest,\n response: StreamResponse,\n request_start: float) -> None:\n self.access_logger.log(request, response, request_start)\n\n\nclass RequestHandler(BaseProtocol):\n \"\"\"HTTP protocol implementation.\n\n RequestHandler handles incoming HTTP request. It reads request line,\n request headers and request payload and calls handle_request() method.\n By default it always returns with 404 response.\n\n RequestHandler handles errors in incoming request, like bad\n status line, bad headers or incomplete payload. If any error occurs,\n connection gets closed.\n\n :param keepalive_timeout: number of seconds before closing\n keep-alive connection\n :type keepalive_timeout: int or None\n\n :param bool tcp_keepalive: TCP keep-alive is on, default is on\n\n :param logger: custom logger object\n :type logger: aiohttp.log.server_logger\n\n :param access_log_class: custom class for access_logger\n :type access_log_class: aiohttp.abc.AbstractAccessLogger\n\n :param access_log: custom logging object\n :type access_log: aiohttp.log.server_logger\n\n :param str access_log_format: access log format string\n\n :param loop: Optional event loop\n\n :param int max_line_size: Optional maximum header line size\n\n :param int max_field_size: Optional maximum header field size\n\n :param int max_headers: Optional maximum header size\n\n \"\"\"\n KEEPALIVE_RESCHEDULE_DELAY = 1\n\n __slots__ = ('_request_count', '_keepalive', '_manager',\n '_request_handler', '_request_factory', '_tcp_keepalive',\n '_keepalive_time', '_keepalive_handle', '_keepalive_timeout',\n '_lingering_time', '_messages', '_message_tail',\n '_waiter', '_error_handler', '_task_handler',\n '_upgrade', '_payload_parser', '_request_parser',\n '_reading_paused', 'logger', 'access_log',\n 'access_logger', '_close', '_force_close',\n '_current_request')\n\n def __init__(self, manager: 'Server', *,\n loop: asyncio.AbstractEventLoop,\n keepalive_timeout: float=75., # NGINX default is 75 secs\n tcp_keepalive: bool=True,\n logger: Logger=server_logger,\n access_log_class: _AnyAbstractAccessLogger=AccessLogger,\n access_log: Logger=access_logger,\n access_log_format: str=AccessLogger.LOG_FORMAT,\n max_line_size: int=8190,\n max_headers: int=32768,\n max_field_size: int=8190,\n lingering_time: float=10.0,\n read_bufsize: int=2 ** 16):\n\n super().__init__(loop)\n\n self._request_count = 0\n self._keepalive = False\n self._current_request = None # type: Optional[BaseRequest]\n self._manager = manager # type: Optional[Server]\n self._request_handler = manager.request_handler # type: Optional[_RequestHandler] # noqa\n self._request_factory = manager.request_factory # type: Optional[_RequestFactory] # noqa\n\n self._tcp_keepalive = tcp_keepalive\n # placeholder to be replaced on keepalive timeout setup\n self._keepalive_time = 0.0\n self._keepalive_handle = None # type: Optional[asyncio.Handle]\n self._keepalive_timeout = keepalive_timeout\n self._lingering_time = float(lingering_time)\n\n self._messages = deque() # type: Any # Python 3.5 has no typing.Deque\n self._message_tail = b''\n\n self._waiter = None # type: Optional[asyncio.Future[None]]\n self._error_handler = None # type: Optional[asyncio.Task[None]]\n self._task_handler = None # type: Optional[asyncio.Task[None]]\n\n self._upgrade = False\n self._payload_parser = None # type: Any\n self._request_parser = HttpRequestParser(\n self, loop, read_bufsize,\n max_line_size=max_line_size,\n max_field_size=max_field_size,\n max_headers=max_headers,\n payload_exception=RequestPayloadError) # type: Optional[HttpRequestParser] # noqa\n\n self.logger = logger\n self.access_log = access_log\n if access_log:\n if issubclass(access_log_class, AbstractAsyncAccessLogger):\n self.access_logger = access_log_class() # type: Optional[AbstractAsyncAccessLogger] # noqa\n else:\n access_logger = access_log_class(access_log, access_log_format)\n self.access_logger = AccessLoggerWrapper(access_logger)\n else:\n self.access_logger = None\n\n self._close = False\n self._force_close = False\n\n def __repr__(self) -> str:\n return \"<{} {}>\".format(\n self.__class__.__name__,\n 'connected' if self.transport is not None else 'disconnected')\n\n @property\n def keepalive_timeout(self) -> float:\n return self._keepalive_timeout\n\n async def shutdown(self, timeout: Optional[float]=15.0) -> None:\n \"\"\"Worker process is about to exit, we need cleanup everything and\n stop accepting requests. It is especially important for keep-alive\n connections.\"\"\"\n self._force_close = True\n\n if self._keepalive_handle is not None:\n self._keepalive_handle.cancel()\n\n if self._waiter:\n self._waiter.cancel()\n\n # wait for handlers\n with suppress(asyncio.CancelledError, asyncio.TimeoutError):\n async with ceil_timeout(timeout):\n if (self._error_handler is not None and\n not self._error_handler.done()):\n await self._error_handler\n\n if self._current_request is not None:\n self._current_request._cancel(asyncio.CancelledError())\n\n if (self._task_handler is not None and\n not self._task_handler.done()):\n await self._task_handler\n\n # force-close non-idle handler\n if self._task_handler is not None:\n self._task_handler.cancel()\n\n if self.transport is not None:\n self.transport.close()\n self.transport = None\n\n def connection_made(self, transport: asyncio.BaseTransport) -> None:\n super().connection_made(transport)\n\n real_transport = cast(asyncio.Transport, transport)\n if self._tcp_keepalive:\n tcp_keepalive(real_transport)\n\n self._task_handler = self._loop.create_task(self.start())\n assert self._manager is not None\n self._manager.connection_made(self, real_transport)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n if self._manager is None:\n return\n self._manager.connection_lost(self, exc)\n\n super().connection_lost(exc)\n\n self._manager = None\n self._force_close = True\n self._request_factory = None\n self._request_handler = None\n self._request_parser = None\n\n if self._keepalive_handle is not None:\n self._keepalive_handle.cancel()\n\n if self._current_request is not None:\n if exc is None:\n exc = ConnectionResetError(\"Connection lost\")\n self._current_request._cancel(exc)\n\n if self._error_handler is not None:\n self._error_handler.cancel()\n\n self._task_handler = None\n\n if self._payload_parser is not None:\n self._payload_parser.feed_eof()\n self._payload_parser = None\n\n def set_parser(self, parser: Any) -> None:\n # Actual type is WebReader\n assert self._payload_parser is None\n\n self._payload_parser = parser\n\n if self._message_tail:\n self._payload_parser.feed_data(self._message_tail)\n self._message_tail = b''\n\n def eof_received(self) -> None:\n pass\n\n def data_received(self, data: bytes) -> None:\n if self._force_close or self._close:\n return\n # parse http messages\n if self._payload_parser is None and not self._upgrade:\n assert self._request_parser is not None\n try:\n messages, upgraded, tail = self._request_parser.feed_data(data)\n except HttpProcessingError as exc:\n # something happened during parsing\n self._error_handler = self._loop.create_task(\n self.handle_parse_error(\n StreamWriter(self, self._loop),\n 400, exc, exc.message))\n self.close()\n except Exception as exc:\n # 500: internal error\n self._error_handler = self._loop.create_task(\n self.handle_parse_error(\n StreamWriter(self, self._loop),\n 500, exc))\n self.close()\n else:\n if messages:\n # sometimes the parser returns no messages\n for (msg, payload) in messages:\n self._request_count += 1\n self._messages.append((msg, payload))\n\n waiter = self._waiter\n if waiter is not None:\n if not waiter.done():\n # don't set result twice\n waiter.set_result(None)\n\n self._upgrade = upgraded\n if upgraded and tail:\n self._message_tail = tail\n\n # no parser, just store\n elif self._payload_parser is None and self._upgrade and data:\n self._message_tail += data\n\n # feed payload\n elif data:\n eof, tail = self._payload_parser.feed_data(data)\n if eof:\n self.close()\n\n def keep_alive(self, val: bool) -> None:\n \"\"\"Set keep-alive connection mode.\n\n :param bool val: new state.\n \"\"\"\n self._keepalive = val\n if self._keepalive_handle:\n self._keepalive_handle.cancel()\n self._keepalive_handle = None\n\n def close(self) -> None:\n \"\"\"Stop accepting new pipelinig messages and close\n connection when handlers done processing messages\"\"\"\n self._close = True\n if self._waiter:\n self._waiter.cancel()\n\n def force_close(self) -> None:\n \"\"\"Force close connection\"\"\"\n self._force_close = True\n if self._waiter:\n self._waiter.cancel()\n if self.transport is not None:\n self.transport.close()\n self.transport = None\n\n async def log_access(self,\n request: BaseRequest,\n response: StreamResponse,\n request_start: float) -> None:\n if self.access_logger is not None:\n await self.access_logger.log(request, response,\n self._loop.time() - request_start)\n\n def log_debug(self, *args: Any, **kw: Any) -> None:\n if self._loop.get_debug():\n self.logger.debug(*args, **kw)\n\n def log_exception(self, *args: Any, **kw: Any) -> None:\n self.logger.exception(*args, **kw)\n\n def _process_keepalive(self) -> None:\n if self._force_close or not self._keepalive:\n return\n\n next = self._keepalive_time + self._keepalive_timeout\n\n # handler in idle state\n if self._waiter:\n if self._loop.time() > next:\n self.force_close()\n return\n\n # not all request handlers are done,\n # reschedule itself to next second\n self._keepalive_handle = self._loop.call_later(\n self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive)\n\n async def _handle_request(self,\n request: BaseRequest,\n start_time: float,\n ) -> Tuple[StreamResponse, bool]:\n assert self._request_handler is not None\n try:\n try:\n self._current_request = request\n resp = await self._request_handler(request)\n finally:\n self._current_request = None\n except HTTPException as exc:\n resp = Response(status=exc.status,\n reason=exc.reason,\n text=exc.text,\n headers=exc.headers)\n reset = await self.finish_response(request, resp, start_time)\n except asyncio.CancelledError:\n raise\n except asyncio.TimeoutError as exc:\n self.log_debug('Request handler timed out.', exc_info=exc)\n resp = self.handle_error(request, 504)\n reset = await self.finish_response(request, resp, start_time)\n except Exception as exc:\n resp = self.handle_error(request, 500, exc)\n reset = await self.finish_response(request, resp, start_time)\n else:\n reset = await self.finish_response(request, resp, start_time)\n\n return resp, reset\n\n async def start(self) -> None:\n \"\"\"Process incoming request.\n\n It reads request line, request headers and request payload, then\n calls handle_request() method. Subclass has to override\n handle_request(). start() handles various exceptions in request\n or response handling. Connection is being closed always unless\n keep_alive(True) specified.\n \"\"\"\n loop = self._loop\n handler = self._task_handler\n assert handler is not None\n manager = self._manager\n assert manager is not None\n keepalive_timeout = self._keepalive_timeout\n resp = None\n assert self._request_factory is not None\n assert self._request_handler is not None\n\n while not self._force_close:\n if not self._messages:\n try:\n # wait for next request\n self._waiter = loop.create_future()\n await self._waiter\n except asyncio.CancelledError:\n break\n finally:\n self._waiter = None\n\n message, payload = self._messages.popleft()\n\n start = loop.time()\n\n manager.requests_count += 1\n writer = StreamWriter(self, loop)\n request = self._request_factory(\n message, payload, self, writer, handler)\n try:\n # a new task is used for copy context vars (#3406)\n task = self._loop.create_task(\n self._handle_request(request, start))\n try:\n resp, reset = await task\n except (asyncio.CancelledError, ConnectionError):\n self.log_debug('Ignored premature client disconnection')\n break\n\n # Drop the processed task from asyncio.Task.all_tasks() early\n del task\n if reset:\n self.log_debug('Ignored premature client disconnection 2')\n break\n\n # notify server about keep-alive\n self._keepalive = bool(resp.keep_alive)\n\n # check payload\n if not payload.is_eof():\n lingering_time = self._lingering_time\n if not self._force_close and lingering_time:\n self.log_debug(\n 'Start lingering close timer for %s sec.',\n lingering_time)\n\n now = loop.time()\n end_t = now + lingering_time\n\n with suppress(\n asyncio.TimeoutError, asyncio.CancelledError):\n while not payload.is_eof() and now < end_t:\n async with ceil_timeout(end_t - now):\n # read and ignore\n await payload.readany()\n now = loop.time()\n\n # if payload still uncompleted\n if not payload.is_eof() and not self._force_close:\n self.log_debug('Uncompleted request.')\n self.close()\n\n payload.set_exception(PayloadAccessError())\n\n except asyncio.CancelledError:\n self.log_debug('Ignored premature client disconnection ')\n break\n except RuntimeError as exc:\n if self._loop.get_debug():\n self.log_exception(\n 'Unhandled runtime exception', exc_info=exc)\n self.force_close()\n except Exception as exc:\n self.log_exception('Unhandled exception', exc_info=exc)\n self.force_close()\n finally:\n if self.transport is None and resp is not None:\n self.log_debug('Ignored premature client disconnection.')\n elif not self._force_close:\n if self._keepalive and not self._close:\n # start keep-alive timer\n if keepalive_timeout is not None:\n now = self._loop.time()\n self._keepalive_time = now\n if self._keepalive_handle is None:\n self._keepalive_handle = loop.call_at(\n now + keepalive_timeout,\n self._process_keepalive)\n else:\n break\n\n # remove handler, close transport if no handlers left\n if not self._force_close:\n self._task_handler = None\n if self.transport is not None and self._error_handler is None:\n self.transport.close()\n\n async def finish_response(self,\n request: BaseRequest,\n resp: StreamResponse,\n start_time: float) -> bool:\n \"\"\"\n Prepare the response and write_eof, then log access. This has to\n be called within the context of any exception so the access logger\n can get exception information. Returns True if the client disconnects\n prematurely.\n \"\"\"\n request._finish()\n if self._request_parser is not None:\n self._request_parser.set_upgraded(False)\n self._upgrade = False\n if self._message_tail:\n self._request_parser.feed_data(self._message_tail)\n self._message_tail = b''\n try:\n prepare_meth = resp.prepare\n except AttributeError:\n if resp is None:\n raise RuntimeError(\"Missing return \"\n \"statement on request handler\")\n else:\n raise RuntimeError(\"Web-handler should return \"\n \"a response instance, \"\n \"got {!r}\".format(resp))\n try:\n await prepare_meth(request)\n await resp.write_eof()\n except ConnectionError:\n await self.log_access(request, resp, start_time)\n return True\n else:\n await self.log_access(request, resp, start_time)\n return False\n\n def handle_error(self,\n request: BaseRequest,\n status: int=500,\n exc: Optional[BaseException]=None,\n message: Optional[str]=None) -> StreamResponse:\n \"\"\"Handle errors.\n\n Returns HTTP response with specific status code. Logs additional\n information. It always closes current connection.\"\"\"\n self.log_exception(\"Error handling request\", exc_info=exc)\n\n ct = 'text/plain'\n if status == HTTPStatus.INTERNAL_SERVER_ERROR:\n title = '{0.value} {0.phrase}'.format(\n HTTPStatus.INTERNAL_SERVER_ERROR\n )\n msg = HTTPStatus.INTERNAL_SERVER_ERROR.description\n tb = None\n if self._loop.get_debug():\n with suppress(Exception):\n tb = traceback.format_exc()\n\n if 'text/html' in request.headers.get('Accept', ''):\n if tb:\n tb = html_escape(tb)\n msg = '<h2>Traceback:</h2>\\n<pre>{}</pre>'.format(tb)\n message = (\n \"<html><head>\"\n \"<title>{title}</title>\"\n \"</head><body>\\n<h1>{title}</h1>\"\n \"\\n{msg}\\n</body></html>\\n\"\n ).format(title=title, msg=msg)\n ct = 'text/html'\n else:\n if tb:\n msg = tb\n message = title + '\\n\\n' + msg\n\n resp = Response(status=status, text=message, content_type=ct)\n resp.force_close()\n\n # some data already got sent, connection is broken\n if request.writer.output_size > 0 or self.transport is None:\n self.force_close()\n\n return resp\n\n async def handle_parse_error(self,\n writer: AbstractStreamWriter,\n status: int,\n exc: Optional[BaseException]=None,\n message: Optional[str]=None) -> None:\n task = current_task()\n assert task is not None\n request = BaseRequest(\n ERROR,\n EMPTY_PAYLOAD, # type: ignore\n self, writer,\n task,\n self._loop)\n\n resp = self.handle_error(request, status, exc, message)\n await resp.prepare(request)\n await resp.write_eof()\n\n if self.transport is not None:\n self.transport.close()\n\n self._error_handler = None\n", "path": "aiohttp/web_protocol.py" } ]
[ { "content": "import asyncio\nimport asyncio.streams\nimport traceback\nfrom collections import deque\nfrom contextlib import suppress\nfrom html import escape as html_escape\nfrom http import HTTPStatus\nfrom logging import Logger\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Awaitable,\n Callable,\n Optional,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nimport yarl\n\nfrom .abc import (\n AbstractAccessLogger,\n AbstractAsyncAccessLogger,\n AbstractStreamWriter,\n)\nfrom .base_protocol import BaseProtocol\nfrom .helpers import ceil_timeout, current_task\nfrom .http import (\n HttpProcessingError,\n HttpRequestParser,\n HttpVersion10,\n RawRequestMessage,\n StreamWriter,\n)\nfrom .log import access_logger, server_logger\nfrom .streams import EMPTY_PAYLOAD, StreamReader\nfrom .tcp_helpers import tcp_keepalive\nfrom .web_exceptions import HTTPException\nfrom .web_log import AccessLogger\nfrom .web_request import BaseRequest\nfrom .web_response import Response, StreamResponse\n\n__all__ = ('RequestHandler', 'RequestPayloadError', 'PayloadAccessError')\n\nif TYPE_CHECKING: # pragma: no cover\n from .web_server import Server # noqa\n\n\n_RequestFactory = Callable[[RawRequestMessage,\n StreamReader,\n 'RequestHandler',\n AbstractStreamWriter,\n 'asyncio.Task[None]'],\n BaseRequest]\n\n_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]\n_AnyAbstractAccessLogger = Union[\n Type[AbstractAsyncAccessLogger],\n Type[AbstractAccessLogger],\n]\n\n\nERROR = RawRequestMessage(\n 'UNKNOWN', '/', HttpVersion10, {},\n {}, True, False, False, False, yarl.URL('/'))\n\n\nclass RequestPayloadError(Exception):\n \"\"\"Payload parsing error.\"\"\"\n\n\nclass PayloadAccessError(Exception):\n \"\"\"Payload was accessed after response was sent.\"\"\"\n\n\nclass AccessLoggerWrapper(AbstractAsyncAccessLogger):\n \"\"\"\n Wraps an AbstractAccessLogger so it behaves\n like an AbstractAsyncAccessLogger.\n \"\"\"\n def __init__(self, access_logger: AbstractAccessLogger):\n self.access_logger = access_logger\n super().__init__()\n\n async def log(self,\n request: BaseRequest,\n response: StreamResponse,\n request_start: float) -> None:\n self.access_logger.log(request, response, request_start)\n\n\nclass RequestHandler(BaseProtocol):\n \"\"\"HTTP protocol implementation.\n\n RequestHandler handles incoming HTTP request. It reads request line,\n request headers and request payload and calls handle_request() method.\n By default it always returns with 404 response.\n\n RequestHandler handles errors in incoming request, like bad\n status line, bad headers or incomplete payload. If any error occurs,\n connection gets closed.\n\n :param keepalive_timeout: number of seconds before closing\n keep-alive connection\n :type keepalive_timeout: int or None\n\n :param bool tcp_keepalive: TCP keep-alive is on, default is on\n\n :param logger: custom logger object\n :type logger: aiohttp.log.server_logger\n\n :param access_log_class: custom class for access_logger\n :type access_log_class: aiohttp.abc.AbstractAccessLogger\n\n :param access_log: custom logging object\n :type access_log: aiohttp.log.server_logger\n\n :param str access_log_format: access log format string\n\n :param loop: Optional event loop\n\n :param int max_line_size: Optional maximum header line size\n\n :param int max_field_size: Optional maximum header field size\n\n :param int max_headers: Optional maximum header size\n\n \"\"\"\n KEEPALIVE_RESCHEDULE_DELAY = 1\n\n __slots__ = ('_request_count', '_keepalive', '_manager',\n '_request_handler', '_request_factory', '_tcp_keepalive',\n '_keepalive_time', '_keepalive_handle', '_keepalive_timeout',\n '_lingering_time', '_messages', '_message_tail',\n '_waiter', '_error_handler', '_task_handler',\n '_upgrade', '_payload_parser', '_request_parser',\n '_reading_paused', 'logger', 'access_log',\n 'access_logger', '_close', '_force_close',\n '_current_request')\n\n def __init__(self, manager: 'Server', *,\n loop: asyncio.AbstractEventLoop,\n keepalive_timeout: float=75., # NGINX default is 75 secs\n tcp_keepalive: bool=True,\n logger: Logger=server_logger,\n access_log_class: _AnyAbstractAccessLogger=AccessLogger,\n access_log: Logger=access_logger,\n access_log_format: str=AccessLogger.LOG_FORMAT,\n max_line_size: int=8190,\n max_headers: int=32768,\n max_field_size: int=8190,\n lingering_time: float=10.0,\n read_bufsize: int=2 ** 16):\n\n super().__init__(loop)\n\n self._request_count = 0\n self._keepalive = False\n self._current_request = None # type: Optional[BaseRequest]\n self._manager = manager # type: Optional[Server]\n self._request_handler = manager.request_handler # type: Optional[_RequestHandler] # noqa\n self._request_factory = manager.request_factory # type: Optional[_RequestFactory] # noqa\n\n self._tcp_keepalive = tcp_keepalive\n # placeholder to be replaced on keepalive timeout setup\n self._keepalive_time = 0.0\n self._keepalive_handle = None # type: Optional[asyncio.Handle]\n self._keepalive_timeout = keepalive_timeout\n self._lingering_time = float(lingering_time)\n\n self._messages = deque() # type: Any # Python 3.5 has no typing.Deque\n self._message_tail = b''\n\n self._waiter = None # type: Optional[asyncio.Future[None]]\n self._error_handler = None # type: Optional[asyncio.Task[None]]\n self._task_handler = None # type: Optional[asyncio.Task[None]]\n\n self._upgrade = False\n self._payload_parser = None # type: Any\n self._request_parser = HttpRequestParser(\n self, loop, read_bufsize,\n max_line_size=max_line_size,\n max_field_size=max_field_size,\n max_headers=max_headers,\n payload_exception=RequestPayloadError) # type: Optional[HttpRequestParser] # noqa\n\n self.logger = logger\n self.access_log = access_log\n if access_log:\n if issubclass(access_log_class, AbstractAsyncAccessLogger):\n self.access_logger = access_log_class() # type: Optional[AbstractAsyncAccessLogger] # noqa\n else:\n access_logger = access_log_class(access_log, access_log_format)\n self.access_logger = AccessLoggerWrapper(access_logger)\n else:\n self.access_logger = None\n\n self._close = False\n self._force_close = False\n\n def __repr__(self) -> str:\n return \"<{} {}>\".format(\n self.__class__.__name__,\n 'connected' if self.transport is not None else 'disconnected')\n\n @property\n def keepalive_timeout(self) -> float:\n return self._keepalive_timeout\n\n async def shutdown(self, timeout: Optional[float]=15.0) -> None:\n \"\"\"Worker process is about to exit, we need cleanup everything and\n stop accepting requests. It is especially important for keep-alive\n connections.\"\"\"\n self._force_close = True\n\n if self._keepalive_handle is not None:\n self._keepalive_handle.cancel()\n\n if self._waiter:\n self._waiter.cancel()\n\n # wait for handlers\n with suppress(asyncio.CancelledError, asyncio.TimeoutError):\n async with ceil_timeout(timeout):\n if (self._error_handler is not None and\n not self._error_handler.done()):\n await self._error_handler\n\n if self._current_request is not None:\n self._current_request._cancel(asyncio.CancelledError())\n\n if (self._task_handler is not None and\n not self._task_handler.done()):\n await self._task_handler\n\n # force-close non-idle handler\n if self._task_handler is not None:\n self._task_handler.cancel()\n\n if self.transport is not None:\n self.transport.close()\n self.transport = None\n\n def connection_made(self, transport: asyncio.BaseTransport) -> None:\n super().connection_made(transport)\n\n real_transport = cast(asyncio.Transport, transport)\n if self._tcp_keepalive:\n tcp_keepalive(real_transport)\n\n self._task_handler = self._loop.create_task(self.start())\n assert self._manager is not None\n self._manager.connection_made(self, real_transport)\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n if self._manager is None:\n return\n self._manager.connection_lost(self, exc)\n\n super().connection_lost(exc)\n\n self._manager = None\n self._force_close = True\n self._request_factory = None\n self._request_handler = None\n self._request_parser = None\n\n if self._keepalive_handle is not None:\n self._keepalive_handle.cancel()\n\n if self._current_request is not None:\n if exc is None:\n exc = ConnectionResetError(\"Connection lost\")\n self._current_request._cancel(exc)\n\n if self._error_handler is not None:\n self._error_handler.cancel()\n if self._task_handler is not None:\n self._task_handler.cancel()\n if self._waiter is not None:\n self._waiter.cancel()\n\n self._task_handler = None\n\n if self._payload_parser is not None:\n self._payload_parser.feed_eof()\n self._payload_parser = None\n\n def set_parser(self, parser: Any) -> None:\n # Actual type is WebReader\n assert self._payload_parser is None\n\n self._payload_parser = parser\n\n if self._message_tail:\n self._payload_parser.feed_data(self._message_tail)\n self._message_tail = b''\n\n def eof_received(self) -> None:\n pass\n\n def data_received(self, data: bytes) -> None:\n if self._force_close or self._close:\n return\n # parse http messages\n if self._payload_parser is None and not self._upgrade:\n assert self._request_parser is not None\n try:\n messages, upgraded, tail = self._request_parser.feed_data(data)\n except HttpProcessingError as exc:\n # something happened during parsing\n self._error_handler = self._loop.create_task(\n self.handle_parse_error(\n StreamWriter(self, self._loop),\n 400, exc, exc.message))\n self.close()\n except Exception as exc:\n # 500: internal error\n self._error_handler = self._loop.create_task(\n self.handle_parse_error(\n StreamWriter(self, self._loop),\n 500, exc))\n self.close()\n else:\n if messages:\n # sometimes the parser returns no messages\n for (msg, payload) in messages:\n self._request_count += 1\n self._messages.append((msg, payload))\n\n waiter = self._waiter\n if waiter is not None:\n if not waiter.done():\n # don't set result twice\n waiter.set_result(None)\n\n self._upgrade = upgraded\n if upgraded and tail:\n self._message_tail = tail\n\n # no parser, just store\n elif self._payload_parser is None and self._upgrade and data:\n self._message_tail += data\n\n # feed payload\n elif data:\n eof, tail = self._payload_parser.feed_data(data)\n if eof:\n self.close()\n\n def keep_alive(self, val: bool) -> None:\n \"\"\"Set keep-alive connection mode.\n\n :param bool val: new state.\n \"\"\"\n self._keepalive = val\n if self._keepalive_handle:\n self._keepalive_handle.cancel()\n self._keepalive_handle = None\n\n def close(self) -> None:\n \"\"\"Stop accepting new pipelinig messages and close\n connection when handlers done processing messages\"\"\"\n self._close = True\n if self._waiter:\n self._waiter.cancel()\n\n def force_close(self) -> None:\n \"\"\"Force close connection\"\"\"\n self._force_close = True\n if self._waiter:\n self._waiter.cancel()\n if self.transport is not None:\n self.transport.close()\n self.transport = None\n\n async def log_access(self,\n request: BaseRequest,\n response: StreamResponse,\n request_start: float) -> None:\n if self.access_logger is not None:\n await self.access_logger.log(request, response,\n self._loop.time() - request_start)\n\n def log_debug(self, *args: Any, **kw: Any) -> None:\n if self._loop.get_debug():\n self.logger.debug(*args, **kw)\n\n def log_exception(self, *args: Any, **kw: Any) -> None:\n self.logger.exception(*args, **kw)\n\n def _process_keepalive(self) -> None:\n if self._force_close or not self._keepalive:\n return\n\n next = self._keepalive_time + self._keepalive_timeout\n\n # handler in idle state\n if self._waiter:\n if self._loop.time() > next:\n self.force_close()\n return\n\n # not all request handlers are done,\n # reschedule itself to next second\n self._keepalive_handle = self._loop.call_later(\n self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive)\n\n async def _handle_request(self,\n request: BaseRequest,\n start_time: float,\n ) -> Tuple[StreamResponse, bool]:\n assert self._request_handler is not None\n try:\n try:\n self._current_request = request\n resp = await self._request_handler(request)\n finally:\n self._current_request = None\n except HTTPException as exc:\n resp = Response(status=exc.status,\n reason=exc.reason,\n text=exc.text,\n headers=exc.headers)\n reset = await self.finish_response(request, resp, start_time)\n except asyncio.CancelledError:\n raise\n except asyncio.TimeoutError as exc:\n self.log_debug('Request handler timed out.', exc_info=exc)\n resp = self.handle_error(request, 504)\n reset = await self.finish_response(request, resp, start_time)\n except Exception as exc:\n resp = self.handle_error(request, 500, exc)\n reset = await self.finish_response(request, resp, start_time)\n else:\n reset = await self.finish_response(request, resp, start_time)\n\n return resp, reset\n\n async def start(self) -> None:\n \"\"\"Process incoming request.\n\n It reads request line, request headers and request payload, then\n calls handle_request() method. Subclass has to override\n handle_request(). start() handles various exceptions in request\n or response handling. Connection is being closed always unless\n keep_alive(True) specified.\n \"\"\"\n loop = self._loop\n handler = self._task_handler\n assert handler is not None\n manager = self._manager\n assert manager is not None\n keepalive_timeout = self._keepalive_timeout\n resp = None\n assert self._request_factory is not None\n assert self._request_handler is not None\n\n while not self._force_close:\n if not self._messages:\n try:\n # wait for next request\n self._waiter = loop.create_future()\n await self._waiter\n except asyncio.CancelledError:\n break\n finally:\n self._waiter = None\n\n message, payload = self._messages.popleft()\n\n start = loop.time()\n\n manager.requests_count += 1\n writer = StreamWriter(self, loop)\n request = self._request_factory(\n message, payload, self, writer, handler)\n try:\n # a new task is used for copy context vars (#3406)\n task = self._loop.create_task(\n self._handle_request(request, start))\n try:\n resp, reset = await task\n except (asyncio.CancelledError, ConnectionError):\n self.log_debug('Ignored premature client disconnection')\n break\n\n # Drop the processed task from asyncio.Task.all_tasks() early\n del task\n if reset:\n self.log_debug('Ignored premature client disconnection 2')\n break\n\n # notify server about keep-alive\n self._keepalive = bool(resp.keep_alive)\n\n # check payload\n if not payload.is_eof():\n lingering_time = self._lingering_time\n if not self._force_close and lingering_time:\n self.log_debug(\n 'Start lingering close timer for %s sec.',\n lingering_time)\n\n now = loop.time()\n end_t = now + lingering_time\n\n with suppress(\n asyncio.TimeoutError, asyncio.CancelledError):\n while not payload.is_eof() and now < end_t:\n async with ceil_timeout(end_t - now):\n # read and ignore\n await payload.readany()\n now = loop.time()\n\n # if payload still uncompleted\n if not payload.is_eof() and not self._force_close:\n self.log_debug('Uncompleted request.')\n self.close()\n\n payload.set_exception(PayloadAccessError())\n\n except asyncio.CancelledError:\n self.log_debug('Ignored premature client disconnection ')\n break\n except RuntimeError as exc:\n if self._loop.get_debug():\n self.log_exception(\n 'Unhandled runtime exception', exc_info=exc)\n self.force_close()\n except Exception as exc:\n self.log_exception('Unhandled exception', exc_info=exc)\n self.force_close()\n finally:\n if self.transport is None and resp is not None:\n self.log_debug('Ignored premature client disconnection.')\n elif not self._force_close:\n if self._keepalive and not self._close:\n # start keep-alive timer\n if keepalive_timeout is not None:\n now = self._loop.time()\n self._keepalive_time = now\n if self._keepalive_handle is None:\n self._keepalive_handle = loop.call_at(\n now + keepalive_timeout,\n self._process_keepalive)\n else:\n break\n\n # remove handler, close transport if no handlers left\n if not self._force_close:\n self._task_handler = None\n if self.transport is not None and self._error_handler is None:\n self.transport.close()\n\n async def finish_response(self,\n request: BaseRequest,\n resp: StreamResponse,\n start_time: float) -> bool:\n \"\"\"\n Prepare the response and write_eof, then log access. This has to\n be called within the context of any exception so the access logger\n can get exception information. Returns True if the client disconnects\n prematurely.\n \"\"\"\n request._finish()\n if self._request_parser is not None:\n self._request_parser.set_upgraded(False)\n self._upgrade = False\n if self._message_tail:\n self._request_parser.feed_data(self._message_tail)\n self._message_tail = b''\n try:\n prepare_meth = resp.prepare\n except AttributeError:\n if resp is None:\n raise RuntimeError(\"Missing return \"\n \"statement on request handler\")\n else:\n raise RuntimeError(\"Web-handler should return \"\n \"a response instance, \"\n \"got {!r}\".format(resp))\n try:\n await prepare_meth(request)\n await resp.write_eof()\n except ConnectionError:\n await self.log_access(request, resp, start_time)\n return True\n else:\n await self.log_access(request, resp, start_time)\n return False\n\n def handle_error(self,\n request: BaseRequest,\n status: int=500,\n exc: Optional[BaseException]=None,\n message: Optional[str]=None) -> StreamResponse:\n \"\"\"Handle errors.\n\n Returns HTTP response with specific status code. Logs additional\n information. It always closes current connection.\"\"\"\n self.log_exception(\"Error handling request\", exc_info=exc)\n\n ct = 'text/plain'\n if status == HTTPStatus.INTERNAL_SERVER_ERROR:\n title = '{0.value} {0.phrase}'.format(\n HTTPStatus.INTERNAL_SERVER_ERROR\n )\n msg = HTTPStatus.INTERNAL_SERVER_ERROR.description\n tb = None\n if self._loop.get_debug():\n with suppress(Exception):\n tb = traceback.format_exc()\n\n if 'text/html' in request.headers.get('Accept', ''):\n if tb:\n tb = html_escape(tb)\n msg = '<h2>Traceback:</h2>\\n<pre>{}</pre>'.format(tb)\n message = (\n \"<html><head>\"\n \"<title>{title}</title>\"\n \"</head><body>\\n<h1>{title}</h1>\"\n \"\\n{msg}\\n</body></html>\\n\"\n ).format(title=title, msg=msg)\n ct = 'text/html'\n else:\n if tb:\n msg = tb\n message = title + '\\n\\n' + msg\n\n resp = Response(status=status, text=message, content_type=ct)\n resp.force_close()\n\n # some data already got sent, connection is broken\n if request.writer.output_size > 0 or self.transport is None:\n self.force_close()\n\n return resp\n\n async def handle_parse_error(self,\n writer: AbstractStreamWriter,\n status: int,\n exc: Optional[BaseException]=None,\n message: Optional[str]=None) -> None:\n task = current_task()\n assert task is not None\n request = BaseRequest(\n ERROR,\n EMPTY_PAYLOAD, # type: ignore\n self, writer,\n task,\n self._loop)\n\n resp = self.handle_error(request, status, exc, message)\n await resp.prepare(request)\n await resp.write_eof()\n\n if self.transport is not None:\n self.transport.close()\n\n self._error_handler = None\n", "path": "aiohttp/web_protocol.py" } ]
diff --git a/CHANGES/4408.bugfix b/CHANGES/4408.bugfix new file mode 100644 index 00000000000..9185aaab042 --- /dev/null +++ b/CHANGES/4408.bugfix @@ -0,0 +1 @@ +Fix a warning about unfinished task in `web_protocol.py` diff --git a/aiohttp/web_protocol.py b/aiohttp/web_protocol.py index 30e9c387e2a..2b7eef5757e 100644 --- a/aiohttp/web_protocol.py +++ b/aiohttp/web_protocol.py @@ -277,6 +277,10 @@ def connection_lost(self, exc: Optional[BaseException]) -> None: if self._error_handler is not None: self._error_handler.cancel() + if self._task_handler is not None: + self._task_handler.cancel() + if self._waiter is not None: + self._waiter.cancel() self._task_handler = None diff --git a/tests/test_web_protocol.py b/tests/test_web_protocol.py index 84d0f493378..4405f192c1c 100644 --- a/tests/test_web_protocol.py +++ b/tests/test_web_protocol.py @@ -821,7 +821,7 @@ async def disconn(): writer.write(b"x") writer.close() await asyncio.sleep(0.1) - logger.debug.assert_called_with('Ignored premature client disconnection.') + logger.debug.assert_called_with('Ignored premature client disconnection') assert disconnected_notified
AUTOMATIC1111__stable-diffusion-webui-12387
[Bug]: Lora cannot be loaded in API mode ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What happened? when I run webui --api to start a simple api only server, and I post a http request ,it show **Skipping unknown extra network: lora** 100%|████████████████████████████████████████████████████████████████████████████████████████████| 40/40 [00:11<00:00, 3.39it/s] INFO: 127.0.0.1:38420 - "POST /sdapi/v1/txt2img HTTP/1.1" 200 OK INFO: 127.0.0.1:38647 - "POST /sdapi/v1/png-info HTTP/1.1" 200 OK And I check the web.py, It seems that lora is not registered,for some reason I can't access the web page, so I don't konw if lora works ### Steps to reproduce the problem 1. download release package 2. download diffusion and lora checkpoint file 3. run webui.sh to prepare env 4. exec ./webui.sh --nowebapi 5. and it occurs ### What should have happened? **Skipping unknown extra network: lora** shouldn't happen ### Commit where the problem happens release v1.0.0-pre ### What platforms do you use to access the UI ? _No response_ ### What browsers do you use to access the UI ? _No response_ ### Command Line Arguments ```Shell ./webui.sh --nowebui ``` ### List of extensions No ### Console logs ```Shell Skipping unknown extra network: lora 100%|████████████████████████████████████████████████████████████████████████████████████████████| 40/40 [00:10<00:00, 3.68it/s] INFO: 127.0.0.1:39473 - "POST /sdapi/v1/txt2img HTTP/1.1" 200 OK INFO: 127.0.0.1:39758 - "POST /sdapi/v1/png-info HTTP/1.1" 200 OK ``` ### Additional information _No response_
[ { "content": "from __future__ import annotations\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport importlib\r\nimport signal\r\nimport re\r\nimport warnings\r\nimport json\r\nfrom threading import Thread\r\nfrom typing import Iterable\r\n\r\nfrom fastapi import FastAPI\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom fastapi.middleware.gzip import GZipMiddleware\r\n\r\nimport logging\r\n\r\n# We can't use cmd_opts for this because it will not have been initialized at this point.\r\nlog_level = os.environ.get(\"SD_WEBUI_LOG_LEVEL\")\r\nif log_level:\r\n log_level = getattr(logging, log_level.upper(), None) or logging.INFO\r\n logging.basicConfig(\r\n level=log_level,\r\n format='%(asctime)s %(levelname)s [%(name)s] %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n )\r\n\r\nlogging.getLogger(\"torch.distributed.nn\").setLevel(logging.ERROR) # sshh...\r\nlogging.getLogger(\"xformers\").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())\r\n\r\nfrom modules import timer\r\nstartup_timer = timer.startup_timer\r\nstartup_timer.record(\"launcher\")\r\n\r\nimport torch\r\nimport pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them\r\nwarnings.filterwarnings(action=\"ignore\", category=DeprecationWarning, module=\"pytorch_lightning\")\r\nwarnings.filterwarnings(action=\"ignore\", category=UserWarning, module=\"torchvision\")\r\nstartup_timer.record(\"import torch\")\r\n\r\nimport gradio # noqa: F401\r\nstartup_timer.record(\"import gradio\")\r\n\r\nfrom modules import paths, timer, import_hook, errors, devices # noqa: F401\r\nstartup_timer.record(\"setup paths\")\r\n\r\nimport ldm.modules.encoders.modules # noqa: F401\r\nstartup_timer.record(\"import ldm\")\r\n\r\n\r\nfrom modules import extra_networks\r\nfrom modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401\r\n\r\n# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors\r\nif \".dev\" in torch.__version__ or \"+git\" in torch.__version__:\r\n torch.__long_version__ = torch.__version__\r\n torch.__version__ = re.search(r'[\\d.]+[\\d]', torch.__version__).group(0)\r\n\r\nfrom modules import shared\r\n\r\nif not shared.cmd_opts.skip_version_check:\r\n errors.check_versions()\r\n\r\nimport modules.codeformer_model as codeformer\r\nimport modules.gfpgan_model as gfpgan\r\nfrom modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states\r\nimport modules.face_restoration\r\nimport modules.img2img\r\n\r\nimport modules.lowvram\r\nimport modules.scripts\r\nimport modules.sd_hijack\r\nimport modules.sd_hijack_optimizations\r\nimport modules.sd_models\r\nimport modules.sd_vae\r\nimport modules.sd_unet\r\nimport modules.txt2img\r\nimport modules.script_callbacks\r\nimport modules.textual_inversion.textual_inversion\r\nimport modules.progress\r\n\r\nimport modules.ui\r\nfrom modules import modelloader\r\nfrom modules.shared import cmd_opts\r\nimport modules.hypernetworks.hypernetwork\r\n\r\nstartup_timer.record(\"other imports\")\r\n\r\n\r\nif cmd_opts.server_name:\r\n server_name = cmd_opts.server_name\r\nelse:\r\n server_name = \"0.0.0.0\" if cmd_opts.listen else None\r\n\r\n\r\ndef fix_asyncio_event_loop_policy():\r\n \"\"\"\r\n The default `asyncio` event loop policy only automatically creates\r\n event loops in the main threads. Other threads must create event\r\n loops explicitly or `asyncio.get_event_loop` (and therefore\r\n `.IOLoop.current`) will fail. Installing this policy allows event\r\n loops to be created automatically on any thread, matching the\r\n behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).\r\n \"\"\"\r\n\r\n import asyncio\r\n\r\n if sys.platform == \"win32\" and hasattr(asyncio, \"WindowsSelectorEventLoopPolicy\"):\r\n # \"Any thread\" and \"selector\" should be orthogonal, but there's not a clean\r\n # interface for composing policies so pick the right base.\r\n _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore\r\n else:\r\n _BasePolicy = asyncio.DefaultEventLoopPolicy\r\n\r\n class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore\r\n \"\"\"Event loop policy that allows loop creation on any thread.\r\n Usage::\r\n\r\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\r\n \"\"\"\r\n\r\n def get_event_loop(self) -> asyncio.AbstractEventLoop:\r\n try:\r\n return super().get_event_loop()\r\n except (RuntimeError, AssertionError):\r\n # This was an AssertionError in python 3.4.2 (which ships with debian jessie)\r\n # and changed to a RuntimeError in 3.4.3.\r\n # \"There is no current event loop in thread %r\"\r\n loop = self.new_event_loop()\r\n self.set_event_loop(loop)\r\n return loop\r\n\r\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\r\n\r\n\r\ndef restore_config_state_file():\r\n config_state_file = shared.opts.restore_config_state_file\r\n if config_state_file == \"\":\r\n return\r\n\r\n shared.opts.restore_config_state_file = \"\"\r\n shared.opts.save(shared.config_filename)\r\n\r\n if os.path.isfile(config_state_file):\r\n print(f\"*** About to restore extension state from file: {config_state_file}\")\r\n with open(config_state_file, \"r\", encoding=\"utf-8\") as f:\r\n config_state = json.load(f)\r\n config_states.restore_extension_config(config_state)\r\n startup_timer.record(\"restore extension config\")\r\n elif config_state_file:\r\n print(f\"!!! Config state backup not found: {config_state_file}\")\r\n\r\n\r\ndef validate_tls_options():\r\n if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):\r\n return\r\n\r\n try:\r\n if not os.path.exists(cmd_opts.tls_keyfile):\r\n print(\"Invalid path to TLS keyfile given\")\r\n if not os.path.exists(cmd_opts.tls_certfile):\r\n print(f\"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'\")\r\n except TypeError:\r\n cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None\r\n print(\"TLS setup invalid, running webui without TLS\")\r\n else:\r\n print(\"Running with TLS\")\r\n startup_timer.record(\"TLS\")\r\n\r\n\r\ndef get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:\r\n \"\"\"\r\n Convert the gradio_auth and gradio_auth_path commandline arguments into\r\n an iterable of (username, password) tuples.\r\n \"\"\"\r\n def process_credential_line(s) -> tuple[str, ...] | None:\r\n s = s.strip()\r\n if not s:\r\n return None\r\n return tuple(s.split(':', 1))\r\n\r\n if cmd_opts.gradio_auth:\r\n for cred in cmd_opts.gradio_auth.split(','):\r\n cred = process_credential_line(cred)\r\n if cred:\r\n yield cred\r\n\r\n if cmd_opts.gradio_auth_path:\r\n with open(cmd_opts.gradio_auth_path, 'r', encoding=\"utf8\") as file:\r\n for line in file.readlines():\r\n for cred in line.strip().split(','):\r\n cred = process_credential_line(cred)\r\n if cred:\r\n yield cred\r\n\r\n\r\ndef configure_sigint_handler():\r\n # make the program just exit at ctrl+c without waiting for anything\r\n def sigint_handler(sig, frame):\r\n print(f'Interrupted with signal {sig} in {frame}')\r\n os._exit(0)\r\n\r\n if not os.environ.get(\"COVERAGE_RUN\"):\r\n # Don't install the immediate-quit handler when running under coverage,\r\n # as then the coverage report won't be generated.\r\n signal.signal(signal.SIGINT, sigint_handler)\r\n\r\n\r\ndef configure_opts_onchange():\r\n shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)\r\n shared.opts.onchange(\"sd_vae\", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)\r\n shared.opts.onchange(\"sd_vae_overrides_per_model_preferences\", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)\r\n shared.opts.onchange(\"temp_dir\", ui_tempdir.on_tmpdir_changed)\r\n shared.opts.onchange(\"gradio_theme\", shared.reload_gradio_theme)\r\n shared.opts.onchange(\"cross_attention_optimization\", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)\r\n startup_timer.record(\"opts onchange\")\r\n\r\n\r\ndef initialize():\r\n fix_asyncio_event_loop_policy()\r\n validate_tls_options()\r\n configure_sigint_handler()\r\n modelloader.cleanup_models()\r\n configure_opts_onchange()\r\n\r\n modules.sd_models.setup_model()\r\n startup_timer.record(\"setup SD model\")\r\n\r\n codeformer.setup_model(cmd_opts.codeformer_models_path)\r\n startup_timer.record(\"setup codeformer\")\r\n\r\n gfpgan.setup_model(cmd_opts.gfpgan_models_path)\r\n startup_timer.record(\"setup gfpgan\")\r\n\r\n initialize_rest(reload_script_modules=False)\r\n\r\n\r\ndef initialize_rest(*, reload_script_modules=False):\r\n \"\"\"\r\n Called both from initialize() and when reloading the webui.\r\n \"\"\"\r\n sd_samplers.set_samplers()\r\n extensions.list_extensions()\r\n startup_timer.record(\"list extensions\")\r\n\r\n restore_config_state_file()\r\n\r\n if cmd_opts.ui_debug_mode:\r\n shared.sd_upscalers = upscaler.UpscalerLanczos().scalers\r\n modules.scripts.load_scripts()\r\n return\r\n\r\n modules.sd_models.list_models()\r\n startup_timer.record(\"list SD models\")\r\n\r\n localization.list_localizations(cmd_opts.localizations_dir)\r\n\r\n with startup_timer.subcategory(\"load scripts\"):\r\n modules.scripts.load_scripts()\r\n\r\n if reload_script_modules:\r\n for module in [module for name, module in sys.modules.items() if name.startswith(\"modules.ui\")]:\r\n importlib.reload(module)\r\n startup_timer.record(\"reload script modules\")\r\n\r\n modelloader.load_upscalers()\r\n startup_timer.record(\"load upscalers\")\r\n\r\n modules.sd_vae.refresh_vae_list()\r\n startup_timer.record(\"refresh VAE\")\r\n modules.textual_inversion.textual_inversion.list_textual_inversion_templates()\r\n startup_timer.record(\"refresh textual inversion templates\")\r\n\r\n modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)\r\n modules.sd_hijack.list_optimizers()\r\n startup_timer.record(\"scripts list_optimizers\")\r\n\r\n modules.sd_unet.list_unets()\r\n startup_timer.record(\"scripts list_unets\")\r\n\r\n def load_model():\r\n \"\"\"\r\n Accesses shared.sd_model property to load model.\r\n After it's available, if it has been loaded before this access by some extension,\r\n its optimization may be None because the list of optimizaers has neet been filled\r\n by that time, so we apply optimization again.\r\n \"\"\"\r\n\r\n shared.sd_model # noqa: B018\r\n\r\n if modules.sd_hijack.current_optimizer is None:\r\n modules.sd_hijack.apply_optimizations()\r\n\r\n devices.first_time_calculation()\r\n\r\n Thread(target=load_model).start()\r\n\r\n shared.reload_hypernetworks()\r\n startup_timer.record(\"reload hypernetworks\")\r\n\r\n ui_extra_networks.initialize()\r\n ui_extra_networks.register_default_pages()\r\n\r\n extra_networks.initialize()\r\n extra_networks.register_default_extra_networks()\r\n startup_timer.record(\"initialize extra networks\")\r\n\r\n\r\ndef setup_middleware(app):\r\n app.middleware_stack = None # reset current middleware to allow modifying user provided list\r\n app.add_middleware(GZipMiddleware, minimum_size=1000)\r\n configure_cors_middleware(app)\r\n app.build_middleware_stack() # rebuild middleware stack on-the-fly\r\n\r\n\r\ndef configure_cors_middleware(app):\r\n cors_options = {\r\n \"allow_methods\": [\"*\"],\r\n \"allow_headers\": [\"*\"],\r\n \"allow_credentials\": True,\r\n }\r\n if cmd_opts.cors_allow_origins:\r\n cors_options[\"allow_origins\"] = cmd_opts.cors_allow_origins.split(',')\r\n if cmd_opts.cors_allow_origins_regex:\r\n cors_options[\"allow_origin_regex\"] = cmd_opts.cors_allow_origins_regex\r\n app.add_middleware(CORSMiddleware, **cors_options)\r\n\r\n\r\ndef create_api(app):\r\n from modules.api.api import Api\r\n api = Api(app, queue_lock)\r\n return api\r\n\r\n\r\ndef api_only():\r\n initialize()\r\n\r\n app = FastAPI()\r\n setup_middleware(app)\r\n api = create_api(app)\r\n\r\n modules.script_callbacks.app_started_callback(None, app)\r\n\r\n print(f\"Startup time: {startup_timer.summary()}.\")\r\n api.launch(\r\n server_name=\"0.0.0.0\" if cmd_opts.listen else \"127.0.0.1\",\r\n port=cmd_opts.port if cmd_opts.port else 7861,\r\n root_path=f\"/{cmd_opts.subpath}\" if cmd_opts.subpath else \"\"\r\n )\r\n\r\n\r\ndef webui():\r\n launch_api = cmd_opts.api\r\n initialize()\r\n\r\n while 1:\r\n if shared.opts.clean_temp_dir_at_start:\r\n ui_tempdir.cleanup_tmpdr()\r\n startup_timer.record(\"cleanup temp dir\")\r\n\r\n modules.script_callbacks.before_ui_callback()\r\n startup_timer.record(\"scripts before_ui_callback\")\r\n\r\n shared.demo = modules.ui.create_ui()\r\n startup_timer.record(\"create ui\")\r\n\r\n if not cmd_opts.no_gradio_queue:\r\n shared.demo.queue(64)\r\n\r\n gradio_auth_creds = list(get_gradio_auth_creds()) or None\r\n\r\n app, local_url, share_url = shared.demo.launch(\r\n share=cmd_opts.share,\r\n server_name=server_name,\r\n server_port=cmd_opts.port,\r\n ssl_keyfile=cmd_opts.tls_keyfile,\r\n ssl_certfile=cmd_opts.tls_certfile,\r\n ssl_verify=cmd_opts.disable_tls_verify,\r\n debug=cmd_opts.gradio_debug,\r\n auth=gradio_auth_creds,\r\n inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',\r\n prevent_thread_lock=True,\r\n allowed_paths=cmd_opts.gradio_allowed_path,\r\n app_kwargs={\r\n \"docs_url\": \"/docs\",\r\n \"redoc_url\": \"/redoc\",\r\n },\r\n root_path=f\"/{cmd_opts.subpath}\" if cmd_opts.subpath else \"\",\r\n )\r\n\r\n # after initial launch, disable --autolaunch for subsequent restarts\r\n cmd_opts.autolaunch = False\r\n\r\n startup_timer.record(\"gradio launch\")\r\n\r\n # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for\r\n # an attacker to trick the user into opening a malicious HTML page, which makes a request to the\r\n # running web ui and do whatever the attacker wants, including installing an extension and\r\n # running its code. We disable this here. Suggested by RyotaK.\r\n app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']\r\n\r\n setup_middleware(app)\r\n\r\n modules.progress.setup_progress_api(app)\r\n modules.ui.setup_ui_api(app)\r\n\r\n if launch_api:\r\n create_api(app)\r\n\r\n ui_extra_networks.add_pages_to_demo(app)\r\n\r\n startup_timer.record(\"add APIs\")\r\n\r\n with startup_timer.subcategory(\"app_started_callback\"):\r\n modules.script_callbacks.app_started_callback(shared.demo, app)\r\n\r\n timer.startup_record = startup_timer.dump()\r\n print(f\"Startup time: {startup_timer.summary()}.\")\r\n\r\n try:\r\n while True:\r\n server_command = shared.state.wait_for_server_command(timeout=5)\r\n if server_command:\r\n if server_command in (\"stop\", \"restart\"):\r\n break\r\n else:\r\n print(f\"Unknown server command: {server_command}\")\r\n except KeyboardInterrupt:\r\n print('Caught KeyboardInterrupt, stopping...')\r\n server_command = \"stop\"\r\n\r\n if server_command == \"stop\":\r\n print(\"Stopping server...\")\r\n # If we catch a keyboard interrupt, we want to stop the server and exit.\r\n shared.demo.close()\r\n break\r\n\r\n print('Restarting UI...')\r\n shared.demo.close()\r\n time.sleep(0.5)\r\n startup_timer.reset()\r\n modules.script_callbacks.app_reload_callback()\r\n startup_timer.record(\"app reload callback\")\r\n modules.script_callbacks.script_unloaded_callback()\r\n startup_timer.record(\"scripts unloaded callback\")\r\n initialize_rest(reload_script_modules=True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if cmd_opts.nowebui:\r\n api_only()\r\n else:\r\n webui()\r\n", "path": "webui.py" } ]
[ { "content": "from __future__ import annotations\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport importlib\r\nimport signal\r\nimport re\r\nimport warnings\r\nimport json\r\nfrom threading import Thread\r\nfrom typing import Iterable\r\n\r\nfrom fastapi import FastAPI\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom fastapi.middleware.gzip import GZipMiddleware\r\n\r\nimport logging\r\n\r\n# We can't use cmd_opts for this because it will not have been initialized at this point.\r\nlog_level = os.environ.get(\"SD_WEBUI_LOG_LEVEL\")\r\nif log_level:\r\n log_level = getattr(logging, log_level.upper(), None) or logging.INFO\r\n logging.basicConfig(\r\n level=log_level,\r\n format='%(asctime)s %(levelname)s [%(name)s] %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n )\r\n\r\nlogging.getLogger(\"torch.distributed.nn\").setLevel(logging.ERROR) # sshh...\r\nlogging.getLogger(\"xformers\").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())\r\n\r\nfrom modules import timer\r\nstartup_timer = timer.startup_timer\r\nstartup_timer.record(\"launcher\")\r\n\r\nimport torch\r\nimport pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them\r\nwarnings.filterwarnings(action=\"ignore\", category=DeprecationWarning, module=\"pytorch_lightning\")\r\nwarnings.filterwarnings(action=\"ignore\", category=UserWarning, module=\"torchvision\")\r\nstartup_timer.record(\"import torch\")\r\n\r\nimport gradio # noqa: F401\r\nstartup_timer.record(\"import gradio\")\r\n\r\nfrom modules import paths, timer, import_hook, errors, devices # noqa: F401\r\nstartup_timer.record(\"setup paths\")\r\n\r\nimport ldm.modules.encoders.modules # noqa: F401\r\nstartup_timer.record(\"import ldm\")\r\n\r\n\r\nfrom modules import extra_networks\r\nfrom modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401\r\n\r\n# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors\r\nif \".dev\" in torch.__version__ or \"+git\" in torch.__version__:\r\n torch.__long_version__ = torch.__version__\r\n torch.__version__ = re.search(r'[\\d.]+[\\d]', torch.__version__).group(0)\r\n\r\nfrom modules import shared\r\n\r\nif not shared.cmd_opts.skip_version_check:\r\n errors.check_versions()\r\n\r\nimport modules.codeformer_model as codeformer\r\nimport modules.gfpgan_model as gfpgan\r\nfrom modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states\r\nimport modules.face_restoration\r\nimport modules.img2img\r\n\r\nimport modules.lowvram\r\nimport modules.scripts\r\nimport modules.sd_hijack\r\nimport modules.sd_hijack_optimizations\r\nimport modules.sd_models\r\nimport modules.sd_vae\r\nimport modules.sd_unet\r\nimport modules.txt2img\r\nimport modules.script_callbacks\r\nimport modules.textual_inversion.textual_inversion\r\nimport modules.progress\r\n\r\nimport modules.ui\r\nfrom modules import modelloader\r\nfrom modules.shared import cmd_opts\r\nimport modules.hypernetworks.hypernetwork\r\n\r\nstartup_timer.record(\"other imports\")\r\n\r\n\r\nif cmd_opts.server_name:\r\n server_name = cmd_opts.server_name\r\nelse:\r\n server_name = \"0.0.0.0\" if cmd_opts.listen else None\r\n\r\n\r\ndef fix_asyncio_event_loop_policy():\r\n \"\"\"\r\n The default `asyncio` event loop policy only automatically creates\r\n event loops in the main threads. Other threads must create event\r\n loops explicitly or `asyncio.get_event_loop` (and therefore\r\n `.IOLoop.current`) will fail. Installing this policy allows event\r\n loops to be created automatically on any thread, matching the\r\n behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).\r\n \"\"\"\r\n\r\n import asyncio\r\n\r\n if sys.platform == \"win32\" and hasattr(asyncio, \"WindowsSelectorEventLoopPolicy\"):\r\n # \"Any thread\" and \"selector\" should be orthogonal, but there's not a clean\r\n # interface for composing policies so pick the right base.\r\n _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore\r\n else:\r\n _BasePolicy = asyncio.DefaultEventLoopPolicy\r\n\r\n class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore\r\n \"\"\"Event loop policy that allows loop creation on any thread.\r\n Usage::\r\n\r\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\r\n \"\"\"\r\n\r\n def get_event_loop(self) -> asyncio.AbstractEventLoop:\r\n try:\r\n return super().get_event_loop()\r\n except (RuntimeError, AssertionError):\r\n # This was an AssertionError in python 3.4.2 (which ships with debian jessie)\r\n # and changed to a RuntimeError in 3.4.3.\r\n # \"There is no current event loop in thread %r\"\r\n loop = self.new_event_loop()\r\n self.set_event_loop(loop)\r\n return loop\r\n\r\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\r\n\r\n\r\ndef restore_config_state_file():\r\n config_state_file = shared.opts.restore_config_state_file\r\n if config_state_file == \"\":\r\n return\r\n\r\n shared.opts.restore_config_state_file = \"\"\r\n shared.opts.save(shared.config_filename)\r\n\r\n if os.path.isfile(config_state_file):\r\n print(f\"*** About to restore extension state from file: {config_state_file}\")\r\n with open(config_state_file, \"r\", encoding=\"utf-8\") as f:\r\n config_state = json.load(f)\r\n config_states.restore_extension_config(config_state)\r\n startup_timer.record(\"restore extension config\")\r\n elif config_state_file:\r\n print(f\"!!! Config state backup not found: {config_state_file}\")\r\n\r\n\r\ndef validate_tls_options():\r\n if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):\r\n return\r\n\r\n try:\r\n if not os.path.exists(cmd_opts.tls_keyfile):\r\n print(\"Invalid path to TLS keyfile given\")\r\n if not os.path.exists(cmd_opts.tls_certfile):\r\n print(f\"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'\")\r\n except TypeError:\r\n cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None\r\n print(\"TLS setup invalid, running webui without TLS\")\r\n else:\r\n print(\"Running with TLS\")\r\n startup_timer.record(\"TLS\")\r\n\r\n\r\ndef get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:\r\n \"\"\"\r\n Convert the gradio_auth and gradio_auth_path commandline arguments into\r\n an iterable of (username, password) tuples.\r\n \"\"\"\r\n def process_credential_line(s) -> tuple[str, ...] | None:\r\n s = s.strip()\r\n if not s:\r\n return None\r\n return tuple(s.split(':', 1))\r\n\r\n if cmd_opts.gradio_auth:\r\n for cred in cmd_opts.gradio_auth.split(','):\r\n cred = process_credential_line(cred)\r\n if cred:\r\n yield cred\r\n\r\n if cmd_opts.gradio_auth_path:\r\n with open(cmd_opts.gradio_auth_path, 'r', encoding=\"utf8\") as file:\r\n for line in file.readlines():\r\n for cred in line.strip().split(','):\r\n cred = process_credential_line(cred)\r\n if cred:\r\n yield cred\r\n\r\n\r\ndef configure_sigint_handler():\r\n # make the program just exit at ctrl+c without waiting for anything\r\n def sigint_handler(sig, frame):\r\n print(f'Interrupted with signal {sig} in {frame}')\r\n os._exit(0)\r\n\r\n if not os.environ.get(\"COVERAGE_RUN\"):\r\n # Don't install the immediate-quit handler when running under coverage,\r\n # as then the coverage report won't be generated.\r\n signal.signal(signal.SIGINT, sigint_handler)\r\n\r\n\r\ndef configure_opts_onchange():\r\n shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)\r\n shared.opts.onchange(\"sd_vae\", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)\r\n shared.opts.onchange(\"sd_vae_overrides_per_model_preferences\", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)\r\n shared.opts.onchange(\"temp_dir\", ui_tempdir.on_tmpdir_changed)\r\n shared.opts.onchange(\"gradio_theme\", shared.reload_gradio_theme)\r\n shared.opts.onchange(\"cross_attention_optimization\", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)\r\n startup_timer.record(\"opts onchange\")\r\n\r\n\r\ndef initialize():\r\n fix_asyncio_event_loop_policy()\r\n validate_tls_options()\r\n configure_sigint_handler()\r\n modelloader.cleanup_models()\r\n configure_opts_onchange()\r\n\r\n modules.sd_models.setup_model()\r\n startup_timer.record(\"setup SD model\")\r\n\r\n codeformer.setup_model(cmd_opts.codeformer_models_path)\r\n startup_timer.record(\"setup codeformer\")\r\n\r\n gfpgan.setup_model(cmd_opts.gfpgan_models_path)\r\n startup_timer.record(\"setup gfpgan\")\r\n\r\n initialize_rest(reload_script_modules=False)\r\n\r\n\r\ndef initialize_rest(*, reload_script_modules=False):\r\n \"\"\"\r\n Called both from initialize() and when reloading the webui.\r\n \"\"\"\r\n sd_samplers.set_samplers()\r\n extensions.list_extensions()\r\n startup_timer.record(\"list extensions\")\r\n\r\n restore_config_state_file()\r\n\r\n if cmd_opts.ui_debug_mode:\r\n shared.sd_upscalers = upscaler.UpscalerLanczos().scalers\r\n modules.scripts.load_scripts()\r\n return\r\n\r\n modules.sd_models.list_models()\r\n startup_timer.record(\"list SD models\")\r\n\r\n localization.list_localizations(cmd_opts.localizations_dir)\r\n\r\n with startup_timer.subcategory(\"load scripts\"):\r\n modules.scripts.load_scripts()\r\n\r\n if reload_script_modules:\r\n for module in [module for name, module in sys.modules.items() if name.startswith(\"modules.ui\")]:\r\n importlib.reload(module)\r\n startup_timer.record(\"reload script modules\")\r\n\r\n modelloader.load_upscalers()\r\n startup_timer.record(\"load upscalers\")\r\n\r\n modules.sd_vae.refresh_vae_list()\r\n startup_timer.record(\"refresh VAE\")\r\n modules.textual_inversion.textual_inversion.list_textual_inversion_templates()\r\n startup_timer.record(\"refresh textual inversion templates\")\r\n\r\n modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)\r\n modules.sd_hijack.list_optimizers()\r\n startup_timer.record(\"scripts list_optimizers\")\r\n\r\n modules.sd_unet.list_unets()\r\n startup_timer.record(\"scripts list_unets\")\r\n\r\n def load_model():\r\n \"\"\"\r\n Accesses shared.sd_model property to load model.\r\n After it's available, if it has been loaded before this access by some extension,\r\n its optimization may be None because the list of optimizaers has neet been filled\r\n by that time, so we apply optimization again.\r\n \"\"\"\r\n\r\n shared.sd_model # noqa: B018\r\n\r\n if modules.sd_hijack.current_optimizer is None:\r\n modules.sd_hijack.apply_optimizations()\r\n\r\n devices.first_time_calculation()\r\n\r\n Thread(target=load_model).start()\r\n\r\n shared.reload_hypernetworks()\r\n startup_timer.record(\"reload hypernetworks\")\r\n\r\n ui_extra_networks.initialize()\r\n ui_extra_networks.register_default_pages()\r\n\r\n extra_networks.initialize()\r\n extra_networks.register_default_extra_networks()\r\n startup_timer.record(\"initialize extra networks\")\r\n\r\n\r\ndef setup_middleware(app):\r\n app.middleware_stack = None # reset current middleware to allow modifying user provided list\r\n app.add_middleware(GZipMiddleware, minimum_size=1000)\r\n configure_cors_middleware(app)\r\n app.build_middleware_stack() # rebuild middleware stack on-the-fly\r\n\r\n\r\ndef configure_cors_middleware(app):\r\n cors_options = {\r\n \"allow_methods\": [\"*\"],\r\n \"allow_headers\": [\"*\"],\r\n \"allow_credentials\": True,\r\n }\r\n if cmd_opts.cors_allow_origins:\r\n cors_options[\"allow_origins\"] = cmd_opts.cors_allow_origins.split(',')\r\n if cmd_opts.cors_allow_origins_regex:\r\n cors_options[\"allow_origin_regex\"] = cmd_opts.cors_allow_origins_regex\r\n app.add_middleware(CORSMiddleware, **cors_options)\r\n\r\n\r\ndef create_api(app):\r\n from modules.api.api import Api\r\n api = Api(app, queue_lock)\r\n return api\r\n\r\n\r\ndef api_only():\r\n initialize()\r\n\r\n app = FastAPI()\r\n setup_middleware(app)\r\n api = create_api(app)\r\n\r\n modules.script_callbacks.before_ui_callback()\r\n modules.script_callbacks.app_started_callback(None, app)\r\n\r\n print(f\"Startup time: {startup_timer.summary()}.\")\r\n api.launch(\r\n server_name=\"0.0.0.0\" if cmd_opts.listen else \"127.0.0.1\",\r\n port=cmd_opts.port if cmd_opts.port else 7861,\r\n root_path=f\"/{cmd_opts.subpath}\" if cmd_opts.subpath else \"\"\r\n )\r\n\r\n\r\ndef webui():\r\n launch_api = cmd_opts.api\r\n initialize()\r\n\r\n while 1:\r\n if shared.opts.clean_temp_dir_at_start:\r\n ui_tempdir.cleanup_tmpdr()\r\n startup_timer.record(\"cleanup temp dir\")\r\n\r\n modules.script_callbacks.before_ui_callback()\r\n startup_timer.record(\"scripts before_ui_callback\")\r\n\r\n shared.demo = modules.ui.create_ui()\r\n startup_timer.record(\"create ui\")\r\n\r\n if not cmd_opts.no_gradio_queue:\r\n shared.demo.queue(64)\r\n\r\n gradio_auth_creds = list(get_gradio_auth_creds()) or None\r\n\r\n app, local_url, share_url = shared.demo.launch(\r\n share=cmd_opts.share,\r\n server_name=server_name,\r\n server_port=cmd_opts.port,\r\n ssl_keyfile=cmd_opts.tls_keyfile,\r\n ssl_certfile=cmd_opts.tls_certfile,\r\n ssl_verify=cmd_opts.disable_tls_verify,\r\n debug=cmd_opts.gradio_debug,\r\n auth=gradio_auth_creds,\r\n inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',\r\n prevent_thread_lock=True,\r\n allowed_paths=cmd_opts.gradio_allowed_path,\r\n app_kwargs={\r\n \"docs_url\": \"/docs\",\r\n \"redoc_url\": \"/redoc\",\r\n },\r\n root_path=f\"/{cmd_opts.subpath}\" if cmd_opts.subpath else \"\",\r\n )\r\n\r\n # after initial launch, disable --autolaunch for subsequent restarts\r\n cmd_opts.autolaunch = False\r\n\r\n startup_timer.record(\"gradio launch\")\r\n\r\n # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for\r\n # an attacker to trick the user into opening a malicious HTML page, which makes a request to the\r\n # running web ui and do whatever the attacker wants, including installing an extension and\r\n # running its code. We disable this here. Suggested by RyotaK.\r\n app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']\r\n\r\n setup_middleware(app)\r\n\r\n modules.progress.setup_progress_api(app)\r\n modules.ui.setup_ui_api(app)\r\n\r\n if launch_api:\r\n create_api(app)\r\n\r\n ui_extra_networks.add_pages_to_demo(app)\r\n\r\n startup_timer.record(\"add APIs\")\r\n\r\n with startup_timer.subcategory(\"app_started_callback\"):\r\n modules.script_callbacks.app_started_callback(shared.demo, app)\r\n\r\n timer.startup_record = startup_timer.dump()\r\n print(f\"Startup time: {startup_timer.summary()}.\")\r\n\r\n try:\r\n while True:\r\n server_command = shared.state.wait_for_server_command(timeout=5)\r\n if server_command:\r\n if server_command in (\"stop\", \"restart\"):\r\n break\r\n else:\r\n print(f\"Unknown server command: {server_command}\")\r\n except KeyboardInterrupt:\r\n print('Caught KeyboardInterrupt, stopping...')\r\n server_command = \"stop\"\r\n\r\n if server_command == \"stop\":\r\n print(\"Stopping server...\")\r\n # If we catch a keyboard interrupt, we want to stop the server and exit.\r\n shared.demo.close()\r\n break\r\n\r\n print('Restarting UI...')\r\n shared.demo.close()\r\n time.sleep(0.5)\r\n startup_timer.reset()\r\n modules.script_callbacks.app_reload_callback()\r\n startup_timer.record(\"app reload callback\")\r\n modules.script_callbacks.script_unloaded_callback()\r\n startup_timer.record(\"scripts unloaded callback\")\r\n initialize_rest(reload_script_modules=True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if cmd_opts.nowebui:\r\n api_only()\r\n else:\r\n webui()\r\n", "path": "webui.py" } ]
diff --git a/webui.py b/webui.py index a5b115759af..86a62a920ae 100644 --- a/webui.py +++ b/webui.py @@ -341,6 +341,7 @@ def api_only(): setup_middleware(app) api = create_api(app) + modules.script_callbacks.before_ui_callback() modules.script_callbacks.app_started_callback(None, app) print(f"Startup time: {startup_timer.summary()}.")
ansible-collections__amazon.aws-337
ec2_eni idempotence bug <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY The encoding of groups causes a bytes type does not equal a string type so idempotence fails. This is the line at fault I belive: https://github.com/ansible-collections/amazon.aws/blame/ac6b2cd478773befdde43bfadc0de40969ad4d0b/plugins/modules/ec2_eni.py#L772 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ec2_eni ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible 2.10.5 python version = 3.9.1 (default, Jan 8 2021, 17:17:17) [Clang 12.0.0 (clang-1200.0.32.28)] ``` ##### STEPS TO REPRODUCE Pass security group names to the ec2_eni module & it will always report a change. ##### EXPECTED RESULTS It should not report a change.
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_eni\nversion_added: 1.0.0\nshort_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance\ndescription:\n - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is\n provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status\n of the network interface.\nauthor:\n - \"Rob White (@wimnat)\"\n - \"Mike Healey (@healem)\"\noptions:\n eni_id:\n description:\n - The ID of the ENI (to modify).\n - If I(eni_id=None) and I(state=present), a new eni will be created.\n type: str\n instance_id:\n description:\n - Instance ID that you wish to attach ENI to.\n - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).\n type: str\n private_ip_address:\n description:\n - Private IP address.\n type: str\n subnet_id:\n description:\n - ID of subnet in which to create the ENI.\n type: str\n description:\n description:\n - Optional description of the ENI.\n type: str\n security_groups:\n description:\n - List of security groups associated with the interface. Only used when I(state=present).\n - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.\n type: list\n elements: str\n state:\n description:\n - Create or delete ENI.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\n device_index:\n description:\n - The index of the device for the network interface attachment on the instance.\n default: 0\n type: int\n attached:\n description:\n - Specifies if network interface should be attached or detached from instance. If omitted, attachment status\n won't change\n type: bool\n force_detach:\n description:\n - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)\n or when deleting an interface with I(state=absent).\n default: false\n type: bool\n delete_on_termination:\n description:\n - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the\n interface is being modified, not on creation.\n required: false\n type: bool\n source_dest_check:\n description:\n - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.\n You can only specify this flag when the interface is being modified, not on creation.\n required: false\n type: bool\n secondary_private_ip_addresses:\n description:\n - A list of IP addresses to assign as secondary IP addresses to the network interface.\n This option is mutually exclusive of I(secondary_private_ip_address_count)\n required: false\n type: list\n elements: str\n purge_secondary_private_ip_addresses:\n description:\n - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.\n - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.\n default: false\n type: bool\n secondary_private_ip_address_count:\n description:\n - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)\n required: false\n type: int\n allow_reassignment:\n description:\n - Indicates whether to allow an IP address that is already assigned to another network interface or instance\n to be reassigned to the specified network interface.\n required: false\n default: false\n type: bool\n name:\n description:\n - Name for the ENI. This will create a tag called \"Name\" with the value assigned here.\n - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface.\n - AWS does not enforce unique Name tags, so duplicate names are possible if you configure it that way.\n If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id).\n required: false\n type: str\n tags:\n description:\n - A hash/dictionary of tags to add to the new ENI or to add/remove from an existing one. Please note that\n the name field sets the \"Name\" tag.\n - To clear all tags, set this option to an empty dictionary to use in conjunction with I(purge_tags).\n If you provide I(name), that tag will not be removed.\n - To prevent removing any tags set I(purge_tags) to false.\n type: dict\n required: false\n version_added: 1.3.0\n purge_tags:\n description:\n - Indicates whether to remove tags not specified in I(tags) or I(name). This means you have to specify all\n the desired tags on each task affecting a network interface.\n - If I(tags) is omitted or None this option is disregarded.\n default: true\n type: bool\n version_added: 1.3.0\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\nnotes:\n - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),\n or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Create an ENI. As no security group is defined, ENI will be created in default security group\n- amazon.aws.ec2_eni:\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n\n# Create an ENI and attach it to an instance\n- amazon.aws.ec2_eni:\n instance_id: i-xxxxxxx\n device_index: 1\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n\n# Create an ENI with two secondary addresses\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n state: present\n secondary_private_ip_address_count: 2\n\n# Assign a secondary IP address to an existing ENI\n# This will purge any existing IPs\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n eni_id: eni-yyyyyyyy\n state: present\n secondary_private_ip_addresses:\n - 172.16.1.1\n\n# Remove any secondary IP addresses from an existing ENI\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n eni_id: eni-yyyyyyyy\n state: present\n secondary_private_ip_address_count: 0\n\n# Destroy an ENI, detaching it from any instance if necessary\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n force_detach: true\n state: absent\n\n# Update an ENI\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n description: \"My new description\"\n state: present\n\n# Update an ENI using name and subnet_id\n- amazon.aws.ec2_eni:\n name: eni-20\n subnet_id: subnet-xxxxxxx\n description: \"My new description\"\n state: present\n\n# Update an ENI identifying it by private_ip_address and subnet_id\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxx\n private_ip_address: 172.16.1.1\n description: \"My new description\"\n\n# Detach an ENI from an instance\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n instance_id: None\n state: present\n\n### Delete an interface on termination\n# First create the interface\n- amazon.aws.ec2_eni:\n instance_id: i-xxxxxxx\n device_index: 1\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n register: eni\n\n# Modify the interface to enable the delete_on_terminaton flag\n- amazon.aws.ec2_eni:\n eni_id: \"{{ eni.interface.id }}\"\n delete_on_termination: true\n\n'''\n\n\nRETURN = '''\ninterface:\n description: Network interface attributes\n returned: when state != absent\n type: complex\n contains:\n description:\n description: interface description\n type: str\n sample: Firewall network interface\n groups:\n description: list of security groups\n type: list\n elements: dict\n sample: [ { \"sg-f8a8a9da\": \"default\" } ]\n id:\n description: network interface id\n type: str\n sample: \"eni-1d889198\"\n mac_address:\n description: interface's physical address\n type: str\n sample: \"00:00:5E:00:53:23\"\n name:\n description: The name of the ENI\n type: str\n sample: \"my-eni-20\"\n owner_id:\n description: aws account id\n type: str\n sample: 812381371\n private_ip_address:\n description: primary ip address of this interface\n type: str\n sample: 10.20.30.40\n private_ip_addresses:\n description: list of all private ip addresses associated to this interface\n type: list\n elements: dict\n sample: [ { \"primary_address\": true, \"private_ip_address\": \"10.20.30.40\" } ]\n source_dest_check:\n description: value of source/dest check flag\n type: bool\n sample: True\n status:\n description: network interface status\n type: str\n sample: \"pending\"\n subnet_id:\n description: which vpc subnet the interface is bound\n type: str\n sample: subnet-b0a0393c\n tags:\n description: The dictionary of tags associated with the ENI\n type: dict\n sample: { \"Name\": \"my-eni\", \"group\": \"Finance\" }\n vpc_id:\n description: which vpc this network interface is bound\n type: str\n sample: vpc-9a9a9da\n\n'''\n\nimport time\n\ntry:\n import botocore.exceptions\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ..module_utils.core import AnsibleAWSModule\nfrom ..module_utils.core import is_boto3_error_code\nfrom ..module_utils.ec2 import AWSRetry\nfrom ..module_utils.ec2 import ansible_dict_to_boto3_tag_list\nfrom ..module_utils.ec2 import get_ec2_security_group_ids_from_names\nfrom ..module_utils.ec2 import boto3_tag_list_to_ansible_dict\nfrom ..module_utils.ec2 import compare_aws_tags\nfrom ..module_utils.waiters import get_waiter\n\n\ndef get_eni_info(interface):\n\n # Private addresses\n private_addresses = []\n if \"PrivateIpAddresses\" in interface:\n for ip in interface[\"PrivateIpAddresses\"]:\n private_addresses.append({'private_ip_address': ip[\"PrivateIpAddress\"], 'primary_address': ip[\"Primary\"]})\n\n groups = {}\n if \"Groups\" in interface:\n for group in interface[\"Groups\"]:\n groups[group[\"GroupId\"]] = group[\"GroupName\"]\n\n interface_info = {'id': interface.get(\"NetworkInterfaceId\"),\n 'subnet_id': interface.get(\"SubnetId\"),\n 'vpc_id': interface.get(\"VpcId\"),\n 'description': interface.get(\"Description\"),\n 'owner_id': interface.get(\"OwnerId\"),\n 'status': interface.get(\"Status\"),\n 'mac_address': interface.get(\"MacAddress\"),\n 'private_ip_address': interface.get(\"PrivateIpAddress\"),\n 'source_dest_check': interface.get(\"SourceDestCheck\"),\n 'groups': groups,\n 'private_ip_addresses': private_addresses\n }\n\n if \"TagSet\" in interface:\n tags = {}\n name = None\n for tag in interface[\"TagSet\"]:\n tags[tag[\"Key\"]] = tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name = tag[\"Value\"]\n interface_info[\"tags\"] = tags\n\n if name is not None:\n interface_info[\"name\"] = name\n\n if \"Attachment\" in interface:\n interface_info['attachment'] = {\n 'attachment_id': interface[\"Attachment\"].get(\"AttachmentId\"),\n 'instance_id': interface[\"Attachment\"].get(\"InstanceId\"),\n 'device_index': interface[\"Attachment\"].get(\"DeviceIndex\"),\n 'status': interface[\"Attachment\"].get(\"Status\"),\n 'attach_time': interface[\"Attachment\"].get(\"AttachTime\"),\n 'delete_on_termination': interface[\"Attachment\"].get(\"DeleteOnTermination\"),\n }\n\n return interface_info\n\n\ndef correct_ips(connection, ip_list, module, eni_id):\n all_there = True\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n ip_set = set(ip_list)\n\n return ip_set.issubset(private_addresses)\n\n\ndef absent_ips(connection, ip_list, module, eni_id):\n all_there = True\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n ip_set = set(ip_list)\n\n return not ip_set.union(private_addresses)\n\n\ndef correct_ip_count(connection, ip_count, module, eni_id):\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n if len(private_addresses) == ip_count:\n return True\n else:\n return False\n\n\ndef wait_for(function_pointer, *args):\n max_wait = 30\n interval_time = 3\n current_wait = 0\n while current_wait < max_wait:\n time.sleep(interval_time)\n current_wait += interval_time\n if function_pointer(*args):\n break\n\n\ndef create_eni(connection, vpc_id, module):\n\n instance_id = module.params.get(\"instance_id\")\n attached = module.params.get(\"attached\")\n if instance_id == 'None':\n instance_id = None\n device_index = module.params.get(\"device_index\")\n subnet_id = module.params.get('subnet_id')\n private_ip_address = module.params.get('private_ip_address')\n description = module.params.get('description')\n security_groups = get_ec2_security_group_ids_from_names(\n module.params.get('security_groups'),\n connection,\n vpc_id=vpc_id,\n boto3=True\n )\n secondary_private_ip_addresses = module.params.get(\"secondary_private_ip_addresses\")\n secondary_private_ip_address_count = module.params.get(\"secondary_private_ip_address_count\")\n changed = False\n tags = module.params.get(\"tags\")\n name = module.params.get(\"name\")\n purge_tags = module.params.get(\"purge_tags\")\n\n try:\n args = {\"SubnetId\": subnet_id}\n if private_ip_address:\n args[\"PrivateIpAddress\"] = private_ip_address\n if description:\n args[\"Description\"] = description\n if len(security_groups) > 0:\n args[\"Groups\"] = security_groups\n eni_dict = connection.create_network_interface(aws_retry=True, **args)\n eni = eni_dict[\"NetworkInterface\"]\n # Once we have an ID make sure we're always modifying the same object\n eni_id = eni[\"NetworkInterfaceId\"]\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n\n if attached and instance_id is not None:\n try:\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"]\n )\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n\n if secondary_private_ip_address_count is not None:\n try:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"],\n SecondaryPrivateIpAddressCount=secondary_private_ip_address_count\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n\n if secondary_private_ip_addresses is not None:\n try:\n connection.assign_private_ip_addresses(\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"],\n PrivateIpAddresses=secondary_private_ip_addresses\n )\n wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n\n manage_tags(eni, name, tags, purge_tags, connection)\n\n # Refresh the eni data\n eni = describe_eni(connection, module, eni_id)\n changed = True\n\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(\n e,\n \"Failed to create eni {0} for {1} in {2} with {3}\".format(name, subnet_id, vpc_id, private_ip_address)\n )\n\n module.exit_json(changed=changed, interface=get_eni_info(eni))\n\n\ndef modify_eni(connection, module, eni):\n\n instance_id = module.params.get(\"instance_id\")\n attached = module.params.get(\"attached\")\n device_index = module.params.get(\"device_index\")\n description = module.params.get('description')\n security_groups = module.params.get('security_groups')\n force_detach = module.params.get(\"force_detach\")\n source_dest_check = module.params.get(\"source_dest_check\")\n delete_on_termination = module.params.get(\"delete_on_termination\")\n secondary_private_ip_addresses = module.params.get(\"secondary_private_ip_addresses\")\n purge_secondary_private_ip_addresses = module.params.get(\"purge_secondary_private_ip_addresses\")\n secondary_private_ip_address_count = module.params.get(\"secondary_private_ip_address_count\")\n allow_reassignment = module.params.get(\"allow_reassignment\")\n changed = False\n tags = module.params.get(\"tags\")\n name = module.params.get(\"name\")\n purge_tags = module.params.get(\"purge_tags\")\n\n eni = uniquely_find_eni(connection, module, eni)\n eni_id = eni[\"NetworkInterfaceId\"]\n\n try:\n if description is not None:\n if \"Description\" not in eni or eni[\"Description\"] != description:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Description={'Value': description}\n )\n changed = True\n if len(security_groups) > 0:\n groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni[\"VpcId\"], boto3=True)\n if sorted(get_sec_group_list(eni[\"Groups\"])) != sorted(groups):\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Groups=groups\n )\n changed = True\n if source_dest_check is not None:\n if \"SourceDestCheck\" not in eni or eni[\"SourceDestCheck\"] != source_dest_check:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n SourceDestCheck={'Value': source_dest_check}\n )\n changed = True\n if delete_on_termination is not None and \"Attachment\" in eni:\n if eni[\"Attachment\"][\"DeleteOnTermination\"] is not delete_on_termination:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Attachment={'AttachmentId': eni[\"Attachment\"][\"AttachmentId\"],\n 'DeleteOnTermination': delete_on_termination}\n )\n changed = True\n if delete_on_termination:\n waiter = \"network_interface_delete_on_terminate\"\n else:\n waiter = \"network_interface_no_delete_on_terminate\"\n get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id])\n\n current_secondary_addresses = []\n if \"PrivateIpAddresses\" in eni:\n current_secondary_addresses = [i[\"PrivateIpAddress\"] for i in eni[\"PrivateIpAddresses\"] if not i[\"Primary\"]]\n\n if secondary_private_ip_addresses is not None:\n secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))\n if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:\n connection.unassign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)),\n )\n wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id)\n changed = True\n secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))\n if secondary_addresses_to_add:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=secondary_addresses_to_add,\n AllowReassignment=allow_reassignment\n )\n wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id)\n changed = True\n\n if secondary_private_ip_address_count is not None:\n current_secondary_address_count = len(current_secondary_addresses)\n if secondary_private_ip_address_count > current_secondary_address_count:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count),\n AllowReassignment=allow_reassignment\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n changed = True\n elif secondary_private_ip_address_count < current_secondary_address_count:\n # How many of these addresses do we want to remove\n secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count\n connection.unassign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count]\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n changed = True\n\n if attached is True:\n if \"Attachment\" in eni and eni[\"Attachment\"][\"InstanceId\"] != instance_id:\n detach_eni(connection, eni, module)\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni_id,\n )\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n changed = True\n if \"Attachment\" not in eni:\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni_id,\n )\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n changed = True\n\n elif attached is False:\n changed |= detach_eni(connection, eni, module)\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n\n changed |= manage_tags(eni, name, tags, purge_tags, connection)\n\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to modify eni {0}\".format(eni_id))\n\n eni = describe_eni(connection, module, eni_id)\n module.exit_json(changed=changed, interface=get_eni_info(eni))\n\n\ndef delete_eni(connection, module):\n\n eni = uniquely_find_eni(connection, module)\n if not eni:\n module.exit_json(changed=False)\n\n eni_id = eni[\"NetworkInterfaceId\"]\n force_detach = module.params.get(\"force_detach\")\n\n try:\n if force_detach is True:\n if \"Attachment\" in eni:\n connection.detach_network_interface(\n aws_retry=True,\n AttachmentId=eni[\"Attachment\"][\"AttachmentId\"],\n Force=True\n )\n # Wait to allow detachment to finish\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n changed = True\n else:\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n changed = True\n\n module.exit_json(changed=changed)\n except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):\n module.exit_json(changed=False)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, \"Failure during delete of {0}\".format(eni_id))\n\n\ndef detach_eni(connection, eni, module):\n\n attached = module.params.get(\"attached\")\n eni_id = eni[\"NetworkInterfaceId\"]\n\n force_detach = module.params.get(\"force_detach\")\n if \"Attachment\" in eni:\n connection.detach_network_interface(\n aws_retry=True,\n AttachmentId=eni[\"Attachment\"][\"AttachmentId\"],\n Force=force_detach\n )\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n return True\n\n return False\n\n\ndef describe_eni(connection, module, eni_id):\n try:\n eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])\n if eni_result[\"NetworkInterfaces\"]:\n return eni_result[\"NetworkInterfaces\"][0]\n else:\n return None\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to describe eni with id: {0}\".format(eni_id))\n\n\ndef uniquely_find_eni(connection, module, eni=None):\n\n if eni:\n # In the case of create, eni_id will not be a param but we can still get the eni_id after creation\n if \"NetworkInterfaceId\" in eni:\n eni_id = eni[\"NetworkInterfaceId\"]\n else:\n eni_id = None\n else:\n eni_id = module.params.get(\"eni_id\")\n\n private_ip_address = module.params.get('private_ip_address')\n subnet_id = module.params.get('subnet_id')\n instance_id = module.params.get('instance_id')\n device_index = module.params.get('device_index')\n attached = module.params.get('attached')\n name = module.params.get(\"name\")\n\n filters = []\n\n # proceed only if we're unequivocally specifying an ENI\n if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):\n return None\n\n if eni_id:\n filters.append({'Name': 'network-interface-id',\n 'Values': [eni_id]})\n\n if private_ip_address and subnet_id and not filters:\n filters.append({'Name': 'private-ip-address',\n 'Values': [private_ip_address]})\n filters.append({'Name': 'subnet-id',\n 'Values': [subnet_id]})\n\n if not attached and instance_id and device_index and not filters:\n filters.append({'Name': 'attachment.instance-id',\n 'Values': [instance_id]})\n filters.append({'Name': 'attachment.device-index',\n 'Values': [device_index]})\n\n if name and subnet_id and not filters:\n filters.append({'Name': 'tag:Name',\n 'Values': [name]})\n filters.append({'Name': 'subnet-id',\n 'Values': [subnet_id]})\n\n if not filters:\n return None\n\n try:\n eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)[\"NetworkInterfaces\"]\n if len(eni_result) == 1:\n return eni_result[0]\n else:\n return None\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to find unique eni with filters: {0}\".format(filters))\n\n return None\n\n\ndef get_sec_group_list(groups):\n\n # Build list of remote security groups\n remote_security_groups = []\n for group in groups:\n remote_security_groups.append(group[\"GroupId\"].encode())\n\n return remote_security_groups\n\n\ndef _get_vpc_id(connection, module, subnet_id):\n\n try:\n subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id])\n return subnets[\"Subnets\"][0][\"VpcId\"]\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to get vpc_id for {0}\".format(subnet_id))\n\n\ndef manage_tags(eni, name, new_tags, purge_tags, connection):\n changed = False\n\n if \"TagSet\" in eni:\n old_tags = boto3_tag_list_to_ansible_dict(eni['TagSet'])\n elif new_tags:\n old_tags = {}\n else:\n # No new tags and nothing in TagSet\n return False\n\n # Do not purge tags unless tags is not None\n if new_tags is None:\n purge_tags = False\n new_tags = {}\n\n if name:\n new_tags['Name'] = name\n\n tags_to_set, tags_to_delete = compare_aws_tags(\n old_tags, new_tags,\n purge_tags=purge_tags,\n )\n if tags_to_set:\n connection.create_tags(\n aws_retry=True,\n Resources=[eni['NetworkInterfaceId']],\n Tags=ansible_dict_to_boto3_tag_list(tags_to_set))\n changed |= True\n if tags_to_delete:\n delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)\n connection.delete_tags(\n aws_retry=True,\n Resources=[eni['NetworkInterfaceId']],\n Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))\n changed |= True\n return changed\n\n\ndef main():\n argument_spec = dict(\n eni_id=dict(default=None, type='str'),\n instance_id=dict(default=None, type='str'),\n private_ip_address=dict(type='str'),\n subnet_id=dict(type='str'),\n description=dict(type='str'),\n security_groups=dict(default=[], type='list', elements='str'),\n device_index=dict(default=0, type='int'),\n state=dict(default='present', choices=['present', 'absent']),\n force_detach=dict(default='no', type='bool'),\n source_dest_check=dict(default=None, type='bool'),\n delete_on_termination=dict(default=None, type='bool'),\n secondary_private_ip_addresses=dict(default=None, type='list', elements='str'),\n purge_secondary_private_ip_addresses=dict(default=False, type='bool'),\n secondary_private_ip_address_count=dict(default=None, type='int'),\n allow_reassignment=dict(default=False, type='bool'),\n attached=dict(default=None, type='bool'),\n name=dict(default=None, type='str'),\n tags=dict(type='dict'),\n purge_tags=dict(default=True, type='bool')\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n mutually_exclusive=[\n ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']\n ],\n required_if=([\n ('attached', True, ['instance_id']),\n ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])\n ])\n )\n\n retry_decorator = AWSRetry.jittered_backoff(\n catch_extra_error_codes=['IncorrectState'],\n )\n connection = module.client('ec2', retry_decorator=retry_decorator)\n state = module.params.get(\"state\")\n\n if state == 'present':\n eni = uniquely_find_eni(connection, module)\n if eni is None:\n subnet_id = module.params.get(\"subnet_id\")\n if subnet_id is None:\n module.fail_json(msg='subnet_id is required when creating a new ENI')\n\n vpc_id = _get_vpc_id(connection, module, subnet_id)\n create_eni(connection, vpc_id, module)\n else:\n modify_eni(connection, module, eni)\n\n elif state == 'absent':\n delete_eni(connection, module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_eni.py" } ]
[ { "content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_eni\nversion_added: 1.0.0\nshort_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance\ndescription:\n - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is\n provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status\n of the network interface.\nauthor:\n - \"Rob White (@wimnat)\"\n - \"Mike Healey (@healem)\"\noptions:\n eni_id:\n description:\n - The ID of the ENI (to modify).\n - If I(eni_id=None) and I(state=present), a new eni will be created.\n type: str\n instance_id:\n description:\n - Instance ID that you wish to attach ENI to.\n - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).\n type: str\n private_ip_address:\n description:\n - Private IP address.\n type: str\n subnet_id:\n description:\n - ID of subnet in which to create the ENI.\n type: str\n description:\n description:\n - Optional description of the ENI.\n type: str\n security_groups:\n description:\n - List of security groups associated with the interface. Only used when I(state=present).\n - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.\n type: list\n elements: str\n state:\n description:\n - Create or delete ENI.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\n device_index:\n description:\n - The index of the device for the network interface attachment on the instance.\n default: 0\n type: int\n attached:\n description:\n - Specifies if network interface should be attached or detached from instance. If omitted, attachment status\n won't change\n type: bool\n force_detach:\n description:\n - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)\n or when deleting an interface with I(state=absent).\n default: false\n type: bool\n delete_on_termination:\n description:\n - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the\n interface is being modified, not on creation.\n required: false\n type: bool\n source_dest_check:\n description:\n - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.\n You can only specify this flag when the interface is being modified, not on creation.\n required: false\n type: bool\n secondary_private_ip_addresses:\n description:\n - A list of IP addresses to assign as secondary IP addresses to the network interface.\n This option is mutually exclusive of I(secondary_private_ip_address_count)\n required: false\n type: list\n elements: str\n purge_secondary_private_ip_addresses:\n description:\n - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.\n - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.\n default: false\n type: bool\n secondary_private_ip_address_count:\n description:\n - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)\n required: false\n type: int\n allow_reassignment:\n description:\n - Indicates whether to allow an IP address that is already assigned to another network interface or instance\n to be reassigned to the specified network interface.\n required: false\n default: false\n type: bool\n name:\n description:\n - Name for the ENI. This will create a tag called \"Name\" with the value assigned here.\n - This can be used in conjunction with I(subnet_id) as another means of identifiying a network interface.\n - AWS does not enforce unique Name tags, so duplicate names are possible if you configure it that way.\n If that is the case, you will need to provide other identifying information such as I(private_ip_address) or I(eni_id).\n required: false\n type: str\n tags:\n description:\n - A hash/dictionary of tags to add to the new ENI or to add/remove from an existing one. Please note that\n the name field sets the \"Name\" tag.\n - To clear all tags, set this option to an empty dictionary to use in conjunction with I(purge_tags).\n If you provide I(name), that tag will not be removed.\n - To prevent removing any tags set I(purge_tags) to false.\n type: dict\n required: false\n version_added: 1.3.0\n purge_tags:\n description:\n - Indicates whether to remove tags not specified in I(tags) or I(name). This means you have to specify all\n the desired tags on each task affecting a network interface.\n - If I(tags) is omitted or None this option is disregarded.\n default: true\n type: bool\n version_added: 1.3.0\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\nnotes:\n - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),\n or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Create an ENI. As no security group is defined, ENI will be created in default security group\n- amazon.aws.ec2_eni:\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n\n# Create an ENI and attach it to an instance\n- amazon.aws.ec2_eni:\n instance_id: i-xxxxxxx\n device_index: 1\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n\n# Create an ENI with two secondary addresses\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n state: present\n secondary_private_ip_address_count: 2\n\n# Assign a secondary IP address to an existing ENI\n# This will purge any existing IPs\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n eni_id: eni-yyyyyyyy\n state: present\n secondary_private_ip_addresses:\n - 172.16.1.1\n\n# Remove any secondary IP addresses from an existing ENI\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxxx\n eni_id: eni-yyyyyyyy\n state: present\n secondary_private_ip_address_count: 0\n\n# Destroy an ENI, detaching it from any instance if necessary\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n force_detach: true\n state: absent\n\n# Update an ENI\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n description: \"My new description\"\n state: present\n\n# Update an ENI using name and subnet_id\n- amazon.aws.ec2_eni:\n name: eni-20\n subnet_id: subnet-xxxxxxx\n description: \"My new description\"\n state: present\n\n# Update an ENI identifying it by private_ip_address and subnet_id\n- amazon.aws.ec2_eni:\n subnet_id: subnet-xxxxxxx\n private_ip_address: 172.16.1.1\n description: \"My new description\"\n\n# Detach an ENI from an instance\n- amazon.aws.ec2_eni:\n eni_id: eni-xxxxxxx\n instance_id: None\n state: present\n\n### Delete an interface on termination\n# First create the interface\n- amazon.aws.ec2_eni:\n instance_id: i-xxxxxxx\n device_index: 1\n private_ip_address: 172.31.0.20\n subnet_id: subnet-xxxxxxxx\n state: present\n register: eni\n\n# Modify the interface to enable the delete_on_terminaton flag\n- amazon.aws.ec2_eni:\n eni_id: \"{{ eni.interface.id }}\"\n delete_on_termination: true\n\n'''\n\n\nRETURN = '''\ninterface:\n description: Network interface attributes\n returned: when state != absent\n type: complex\n contains:\n description:\n description: interface description\n type: str\n sample: Firewall network interface\n groups:\n description: list of security groups\n type: list\n elements: dict\n sample: [ { \"sg-f8a8a9da\": \"default\" } ]\n id:\n description: network interface id\n type: str\n sample: \"eni-1d889198\"\n mac_address:\n description: interface's physical address\n type: str\n sample: \"00:00:5E:00:53:23\"\n name:\n description: The name of the ENI\n type: str\n sample: \"my-eni-20\"\n owner_id:\n description: aws account id\n type: str\n sample: 812381371\n private_ip_address:\n description: primary ip address of this interface\n type: str\n sample: 10.20.30.40\n private_ip_addresses:\n description: list of all private ip addresses associated to this interface\n type: list\n elements: dict\n sample: [ { \"primary_address\": true, \"private_ip_address\": \"10.20.30.40\" } ]\n source_dest_check:\n description: value of source/dest check flag\n type: bool\n sample: True\n status:\n description: network interface status\n type: str\n sample: \"pending\"\n subnet_id:\n description: which vpc subnet the interface is bound\n type: str\n sample: subnet-b0a0393c\n tags:\n description: The dictionary of tags associated with the ENI\n type: dict\n sample: { \"Name\": \"my-eni\", \"group\": \"Finance\" }\n vpc_id:\n description: which vpc this network interface is bound\n type: str\n sample: vpc-9a9a9da\n\n'''\n\nimport time\n\ntry:\n import botocore.exceptions\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ..module_utils.core import AnsibleAWSModule\nfrom ..module_utils.core import is_boto3_error_code\nfrom ..module_utils.ec2 import AWSRetry\nfrom ..module_utils.ec2 import ansible_dict_to_boto3_tag_list\nfrom ..module_utils.ec2 import get_ec2_security_group_ids_from_names\nfrom ..module_utils.ec2 import boto3_tag_list_to_ansible_dict\nfrom ..module_utils.ec2 import compare_aws_tags\nfrom ..module_utils.waiters import get_waiter\n\n\ndef get_eni_info(interface):\n\n # Private addresses\n private_addresses = []\n if \"PrivateIpAddresses\" in interface:\n for ip in interface[\"PrivateIpAddresses\"]:\n private_addresses.append({'private_ip_address': ip[\"PrivateIpAddress\"], 'primary_address': ip[\"Primary\"]})\n\n groups = {}\n if \"Groups\" in interface:\n for group in interface[\"Groups\"]:\n groups[group[\"GroupId\"]] = group[\"GroupName\"]\n\n interface_info = {'id': interface.get(\"NetworkInterfaceId\"),\n 'subnet_id': interface.get(\"SubnetId\"),\n 'vpc_id': interface.get(\"VpcId\"),\n 'description': interface.get(\"Description\"),\n 'owner_id': interface.get(\"OwnerId\"),\n 'status': interface.get(\"Status\"),\n 'mac_address': interface.get(\"MacAddress\"),\n 'private_ip_address': interface.get(\"PrivateIpAddress\"),\n 'source_dest_check': interface.get(\"SourceDestCheck\"),\n 'groups': groups,\n 'private_ip_addresses': private_addresses\n }\n\n if \"TagSet\" in interface:\n tags = {}\n name = None\n for tag in interface[\"TagSet\"]:\n tags[tag[\"Key\"]] = tag[\"Value\"]\n if tag[\"Key\"] == \"Name\":\n name = tag[\"Value\"]\n interface_info[\"tags\"] = tags\n\n if name is not None:\n interface_info[\"name\"] = name\n\n if \"Attachment\" in interface:\n interface_info['attachment'] = {\n 'attachment_id': interface[\"Attachment\"].get(\"AttachmentId\"),\n 'instance_id': interface[\"Attachment\"].get(\"InstanceId\"),\n 'device_index': interface[\"Attachment\"].get(\"DeviceIndex\"),\n 'status': interface[\"Attachment\"].get(\"Status\"),\n 'attach_time': interface[\"Attachment\"].get(\"AttachTime\"),\n 'delete_on_termination': interface[\"Attachment\"].get(\"DeleteOnTermination\"),\n }\n\n return interface_info\n\n\ndef correct_ips(connection, ip_list, module, eni_id):\n all_there = True\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n ip_set = set(ip_list)\n\n return ip_set.issubset(private_addresses)\n\n\ndef absent_ips(connection, ip_list, module, eni_id):\n all_there = True\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n ip_set = set(ip_list)\n\n return not ip_set.union(private_addresses)\n\n\ndef correct_ip_count(connection, ip_count, module, eni_id):\n eni = describe_eni(connection, module, eni_id)\n private_addresses = set()\n if \"PrivateIpAddresses\" in eni:\n for ip in eni[\"PrivateIpAddresses\"]:\n private_addresses.add(ip[\"PrivateIpAddress\"])\n\n if len(private_addresses) == ip_count:\n return True\n else:\n return False\n\n\ndef wait_for(function_pointer, *args):\n max_wait = 30\n interval_time = 3\n current_wait = 0\n while current_wait < max_wait:\n time.sleep(interval_time)\n current_wait += interval_time\n if function_pointer(*args):\n break\n\n\ndef create_eni(connection, vpc_id, module):\n\n instance_id = module.params.get(\"instance_id\")\n attached = module.params.get(\"attached\")\n if instance_id == 'None':\n instance_id = None\n device_index = module.params.get(\"device_index\")\n subnet_id = module.params.get('subnet_id')\n private_ip_address = module.params.get('private_ip_address')\n description = module.params.get('description')\n security_groups = get_ec2_security_group_ids_from_names(\n module.params.get('security_groups'),\n connection,\n vpc_id=vpc_id,\n boto3=True\n )\n secondary_private_ip_addresses = module.params.get(\"secondary_private_ip_addresses\")\n secondary_private_ip_address_count = module.params.get(\"secondary_private_ip_address_count\")\n changed = False\n tags = module.params.get(\"tags\")\n name = module.params.get(\"name\")\n purge_tags = module.params.get(\"purge_tags\")\n\n try:\n args = {\"SubnetId\": subnet_id}\n if private_ip_address:\n args[\"PrivateIpAddress\"] = private_ip_address\n if description:\n args[\"Description\"] = description\n if len(security_groups) > 0:\n args[\"Groups\"] = security_groups\n eni_dict = connection.create_network_interface(aws_retry=True, **args)\n eni = eni_dict[\"NetworkInterface\"]\n # Once we have an ID make sure we're always modifying the same object\n eni_id = eni[\"NetworkInterfaceId\"]\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n\n if attached and instance_id is not None:\n try:\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"]\n )\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n\n if secondary_private_ip_address_count is not None:\n try:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"],\n SecondaryPrivateIpAddressCount=secondary_private_ip_address_count\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n\n if secondary_private_ip_addresses is not None:\n try:\n connection.assign_private_ip_addresses(\n NetworkInterfaceId=eni[\"NetworkInterfaceId\"],\n PrivateIpAddresses=secondary_private_ip_addresses\n )\n wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n raise\n\n manage_tags(eni, name, tags, purge_tags, connection)\n\n # Refresh the eni data\n eni = describe_eni(connection, module, eni_id)\n changed = True\n\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(\n e,\n \"Failed to create eni {0} for {1} in {2} with {3}\".format(name, subnet_id, vpc_id, private_ip_address)\n )\n\n module.exit_json(changed=changed, interface=get_eni_info(eni))\n\n\ndef modify_eni(connection, module, eni):\n\n instance_id = module.params.get(\"instance_id\")\n attached = module.params.get(\"attached\")\n device_index = module.params.get(\"device_index\")\n description = module.params.get('description')\n security_groups = module.params.get('security_groups')\n force_detach = module.params.get(\"force_detach\")\n source_dest_check = module.params.get(\"source_dest_check\")\n delete_on_termination = module.params.get(\"delete_on_termination\")\n secondary_private_ip_addresses = module.params.get(\"secondary_private_ip_addresses\")\n purge_secondary_private_ip_addresses = module.params.get(\"purge_secondary_private_ip_addresses\")\n secondary_private_ip_address_count = module.params.get(\"secondary_private_ip_address_count\")\n allow_reassignment = module.params.get(\"allow_reassignment\")\n changed = False\n tags = module.params.get(\"tags\")\n name = module.params.get(\"name\")\n purge_tags = module.params.get(\"purge_tags\")\n\n eni = uniquely_find_eni(connection, module, eni)\n eni_id = eni[\"NetworkInterfaceId\"]\n\n try:\n if description is not None:\n if \"Description\" not in eni or eni[\"Description\"] != description:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Description={'Value': description}\n )\n changed = True\n if len(security_groups) > 0:\n groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=eni[\"VpcId\"], boto3=True)\n if sorted(get_sec_group_list(eni[\"Groups\"])) != sorted(groups):\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Groups=groups\n )\n changed = True\n if source_dest_check is not None:\n if \"SourceDestCheck\" not in eni or eni[\"SourceDestCheck\"] != source_dest_check:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n SourceDestCheck={'Value': source_dest_check}\n )\n changed = True\n if delete_on_termination is not None and \"Attachment\" in eni:\n if eni[\"Attachment\"][\"DeleteOnTermination\"] is not delete_on_termination:\n connection.modify_network_interface_attribute(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n Attachment={'AttachmentId': eni[\"Attachment\"][\"AttachmentId\"],\n 'DeleteOnTermination': delete_on_termination}\n )\n changed = True\n if delete_on_termination:\n waiter = \"network_interface_delete_on_terminate\"\n else:\n waiter = \"network_interface_no_delete_on_terminate\"\n get_waiter(connection, waiter).wait(NetworkInterfaceIds=[eni_id])\n\n current_secondary_addresses = []\n if \"PrivateIpAddresses\" in eni:\n current_secondary_addresses = [i[\"PrivateIpAddress\"] for i in eni[\"PrivateIpAddresses\"] if not i[\"Primary\"]]\n\n if secondary_private_ip_addresses is not None:\n secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))\n if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:\n connection.unassign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)),\n )\n wait_for(absent_ips, connection, secondary_addresses_to_remove, module, eni_id)\n changed = True\n secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))\n if secondary_addresses_to_add:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=secondary_addresses_to_add,\n AllowReassignment=allow_reassignment\n )\n wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id)\n changed = True\n\n if secondary_private_ip_address_count is not None:\n current_secondary_address_count = len(current_secondary_addresses)\n if secondary_private_ip_address_count > current_secondary_address_count:\n connection.assign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count),\n AllowReassignment=allow_reassignment\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n changed = True\n elif secondary_private_ip_address_count < current_secondary_address_count:\n # How many of these addresses do we want to remove\n secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count\n connection.unassign_private_ip_addresses(\n aws_retry=True,\n NetworkInterfaceId=eni_id,\n PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count]\n )\n wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id)\n changed = True\n\n if attached is True:\n if \"Attachment\" in eni and eni[\"Attachment\"][\"InstanceId\"] != instance_id:\n detach_eni(connection, eni, module)\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni_id,\n )\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n changed = True\n if \"Attachment\" not in eni:\n connection.attach_network_interface(\n aws_retry=True,\n InstanceId=instance_id,\n DeviceIndex=device_index,\n NetworkInterfaceId=eni_id,\n )\n get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id])\n changed = True\n\n elif attached is False:\n changed |= detach_eni(connection, eni, module)\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n\n changed |= manage_tags(eni, name, tags, purge_tags, connection)\n\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to modify eni {0}\".format(eni_id))\n\n eni = describe_eni(connection, module, eni_id)\n module.exit_json(changed=changed, interface=get_eni_info(eni))\n\n\ndef delete_eni(connection, module):\n\n eni = uniquely_find_eni(connection, module)\n if not eni:\n module.exit_json(changed=False)\n\n eni_id = eni[\"NetworkInterfaceId\"]\n force_detach = module.params.get(\"force_detach\")\n\n try:\n if force_detach is True:\n if \"Attachment\" in eni:\n connection.detach_network_interface(\n aws_retry=True,\n AttachmentId=eni[\"Attachment\"][\"AttachmentId\"],\n Force=True\n )\n # Wait to allow detachment to finish\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n changed = True\n else:\n connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id)\n changed = True\n\n module.exit_json(changed=changed)\n except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'):\n module.exit_json(changed=False)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, \"Failure during delete of {0}\".format(eni_id))\n\n\ndef detach_eni(connection, eni, module):\n\n attached = module.params.get(\"attached\")\n eni_id = eni[\"NetworkInterfaceId\"]\n\n force_detach = module.params.get(\"force_detach\")\n if \"Attachment\" in eni:\n connection.detach_network_interface(\n aws_retry=True,\n AttachmentId=eni[\"Attachment\"][\"AttachmentId\"],\n Force=force_detach\n )\n get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id])\n return True\n\n return False\n\n\ndef describe_eni(connection, module, eni_id):\n try:\n eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])\n if eni_result[\"NetworkInterfaces\"]:\n return eni_result[\"NetworkInterfaces\"][0]\n else:\n return None\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to describe eni with id: {0}\".format(eni_id))\n\n\ndef uniquely_find_eni(connection, module, eni=None):\n\n if eni:\n # In the case of create, eni_id will not be a param but we can still get the eni_id after creation\n if \"NetworkInterfaceId\" in eni:\n eni_id = eni[\"NetworkInterfaceId\"]\n else:\n eni_id = None\n else:\n eni_id = module.params.get(\"eni_id\")\n\n private_ip_address = module.params.get('private_ip_address')\n subnet_id = module.params.get('subnet_id')\n instance_id = module.params.get('instance_id')\n device_index = module.params.get('device_index')\n attached = module.params.get('attached')\n name = module.params.get(\"name\")\n\n filters = []\n\n # proceed only if we're unequivocally specifying an ENI\n if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):\n return None\n\n if eni_id:\n filters.append({'Name': 'network-interface-id',\n 'Values': [eni_id]})\n\n if private_ip_address and subnet_id and not filters:\n filters.append({'Name': 'private-ip-address',\n 'Values': [private_ip_address]})\n filters.append({'Name': 'subnet-id',\n 'Values': [subnet_id]})\n\n if not attached and instance_id and device_index and not filters:\n filters.append({'Name': 'attachment.instance-id',\n 'Values': [instance_id]})\n filters.append({'Name': 'attachment.device-index',\n 'Values': [device_index]})\n\n if name and subnet_id and not filters:\n filters.append({'Name': 'tag:Name',\n 'Values': [name]})\n filters.append({'Name': 'subnet-id',\n 'Values': [subnet_id]})\n\n if not filters:\n return None\n\n try:\n eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)[\"NetworkInterfaces\"]\n if len(eni_result) == 1:\n return eni_result[0]\n else:\n return None\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to find unique eni with filters: {0}\".format(filters))\n\n return None\n\n\ndef get_sec_group_list(groups):\n\n # Build list of remote security groups\n remote_security_groups = []\n for group in groups:\n remote_security_groups.append(group[\"GroupId\"])\n\n return remote_security_groups\n\n\ndef _get_vpc_id(connection, module, subnet_id):\n\n try:\n subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id])\n return subnets[\"Subnets\"][0][\"VpcId\"]\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, \"Failed to get vpc_id for {0}\".format(subnet_id))\n\n\ndef manage_tags(eni, name, new_tags, purge_tags, connection):\n changed = False\n\n if \"TagSet\" in eni:\n old_tags = boto3_tag_list_to_ansible_dict(eni['TagSet'])\n elif new_tags:\n old_tags = {}\n else:\n # No new tags and nothing in TagSet\n return False\n\n # Do not purge tags unless tags is not None\n if new_tags is None:\n purge_tags = False\n new_tags = {}\n\n if name:\n new_tags['Name'] = name\n\n tags_to_set, tags_to_delete = compare_aws_tags(\n old_tags, new_tags,\n purge_tags=purge_tags,\n )\n if tags_to_set:\n connection.create_tags(\n aws_retry=True,\n Resources=[eni['NetworkInterfaceId']],\n Tags=ansible_dict_to_boto3_tag_list(tags_to_set))\n changed |= True\n if tags_to_delete:\n delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)\n connection.delete_tags(\n aws_retry=True,\n Resources=[eni['NetworkInterfaceId']],\n Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))\n changed |= True\n return changed\n\n\ndef main():\n argument_spec = dict(\n eni_id=dict(default=None, type='str'),\n instance_id=dict(default=None, type='str'),\n private_ip_address=dict(type='str'),\n subnet_id=dict(type='str'),\n description=dict(type='str'),\n security_groups=dict(default=[], type='list', elements='str'),\n device_index=dict(default=0, type='int'),\n state=dict(default='present', choices=['present', 'absent']),\n force_detach=dict(default='no', type='bool'),\n source_dest_check=dict(default=None, type='bool'),\n delete_on_termination=dict(default=None, type='bool'),\n secondary_private_ip_addresses=dict(default=None, type='list', elements='str'),\n purge_secondary_private_ip_addresses=dict(default=False, type='bool'),\n secondary_private_ip_address_count=dict(default=None, type='int'),\n allow_reassignment=dict(default=False, type='bool'),\n attached=dict(default=None, type='bool'),\n name=dict(default=None, type='str'),\n tags=dict(type='dict'),\n purge_tags=dict(default=True, type='bool')\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n mutually_exclusive=[\n ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']\n ],\n required_if=([\n ('attached', True, ['instance_id']),\n ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])\n ])\n )\n\n retry_decorator = AWSRetry.jittered_backoff(\n catch_extra_error_codes=['IncorrectState'],\n )\n connection = module.client('ec2', retry_decorator=retry_decorator)\n state = module.params.get(\"state\")\n\n if state == 'present':\n eni = uniquely_find_eni(connection, module)\n if eni is None:\n subnet_id = module.params.get(\"subnet_id\")\n if subnet_id is None:\n module.fail_json(msg='subnet_id is required when creating a new ENI')\n\n vpc_id = _get_vpc_id(connection, module, subnet_id)\n create_eni(connection, vpc_id, module)\n else:\n modify_eni(connection, module, eni)\n\n elif state == 'absent':\n delete_eni(connection, module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_eni.py" } ]
diff --git a/changelogs/fragments/337-ec2_eni-fix-idempotency-security-groups.yml b/changelogs/fragments/337-ec2_eni-fix-idempotency-security-groups.yml new file mode 100644 index 00000000000..0a04d1de173 --- /dev/null +++ b/changelogs/fragments/337-ec2_eni-fix-idempotency-security-groups.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- ec2_eni - fix idempotency when ``security_groups`` attribute is specified (https://github.com/ansible-collections/amazon.aws/pull/337). diff --git a/plugins/modules/ec2_eni.py b/plugins/modules/ec2_eni.py index 01a81f991d9..d9a82a73934 100644 --- a/plugins/modules/ec2_eni.py +++ b/plugins/modules/ec2_eni.py @@ -769,7 +769,7 @@ def get_sec_group_list(groups): # Build list of remote security groups remote_security_groups = [] for group in groups: - remote_security_groups.append(group["GroupId"].encode()) + remote_security_groups.append(group["GroupId"]) return remote_security_groups diff --git a/tests/integration/targets/ec2_eni/tasks/main.yaml b/tests/integration/targets/ec2_eni/tasks/main.yaml index 3a0996617bf..e02d641a1a1 100644 --- a/tests/integration/targets/ec2_eni/tasks/main.yaml +++ b/tests/integration/targets/ec2_eni/tasks/main.yaml @@ -7,6 +7,7 @@ region: "{{ aws_region }}" collections: + - ansible.netcommon - community.aws block: diff --git a/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml b/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml index b18af2dc9b3..49db43759fa 100644 --- a/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml +++ b/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml @@ -57,7 +57,7 @@ - _interface_0.private_dns_name is string - _interface_0.private_dns_name.endswith("ec2.internal") - '"private_ip_address" in _interface_0' - - _interface_0.private_ip_address | ipaddr() + - _interface_0.private_ip_address | ansible.netcommon.ipaddr - _interface_0.private_ip_address == ip_1 - '"private_ip_addresses" in _interface_0' - _interface_0.private_ip_addresses | length == 1 @@ -152,7 +152,7 @@ - _interface_0.private_dns_name is string - _interface_0.private_dns_name.endswith("ec2.internal") - '"private_ip_address" in _interface_0' - - _interface_0.private_ip_address | ipaddr() + - _interface_0.private_ip_address | ansible.netcommon.ipaddr - _interface_0.private_ip_address == ip_5 - '"private_ip_addresses" in _interface_0' - _interface_0.private_ip_addresses | length == 1
scikit-hep__awkward-1248
`NumpyLike.to_rectilinear` fails for NumPy arrays ### Version of Awkward Array 1.8.3rc0 ### Description and code to reproduce This bug is most easily triggered when invoking a NumPy function with a rectilinear Awkward argument and NumPy rectilinear argument, e.g. `np.isin`: ```pycon >>> reference = np.r_[1,2,3,4] >>> test = ak.Array([1,2,9,0]) >>> np.isin(test, reference) ``` which gives the following traceback: ```pytb Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in isin File "/home/angus/Git/awkward-1.0/awkward/highlevel.py", line 1440, in __array_function__ return ak._connect._numpy.array_function(func, types, args, kwargs) File "/home/angus/Git/awkward-1.0/awkward/_connect/_numpy.py", line 40, in array_function args = tuple(_to_rectilinear(x) for x in args) File "/home/angus/Git/awkward-1.0/awkward/_connect/_numpy.py", line 40, in <genexpr> args = tuple(_to_rectilinear(x) for x in args) File "/home/angus/Git/awkward-1.0/awkward/_connect/_numpy.py", line 32, in _to_rectilinear return nplike.to_rectilinear(arg, allow_missing=False) File "/home/angus/Git/awkward-1.0/awkward/nplike.py", line 422, in to_rectilinear return [self.to_rectilinear(x, *args, **kwargs) for x in array] File "/home/angus/Git/awkward-1.0/awkward/nplike.py", line 422, in <listcomp> return [self.to_rectilinear(x, *args, **kwargs) for x in array] File "/home/angus/Git/awkward-1.0/awkward/nplike.py", line 425, in to_rectilinear raise TypeError("to_rectilinear argument must be iterable") TypeError: to_rectilinear argument must be iterable ``` I think we need to handle the case where an array is `np.ndarray`.
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n# v2: keep this file, but modernize the 'of' function; ptr_lib is gone.\n\n\nimport ctypes\n\nfrom collections.abc import Iterable\n\nimport numpy\n\nimport awkward as ak\n\n\ndef of(*arrays):\n libs = set()\n for array in arrays:\n nplike = getattr(array, \"nplike\", None)\n if nplike is not None:\n libs.add(nplike)\n\n if any(isinstance(x, ak._v2._typetracer.TypeTracer) for x in libs):\n return ak._v2._typetracer.TypeTracer.instance()\n\n if libs == set():\n return Numpy.instance()\n elif len(libs) == 1:\n return next(iter(libs))\n else:\n raise ValueError(\n \"\"\"attempting to use both a 'cpu' array and a 'cuda' array in the \"\"\"\n \"\"\"same operation; use one of\n\n ak.to_kernels(array, 'cpu')\n ak.to_kernels(array, 'cuda')\n\nto move one or the other to main memory or the GPU(s).\"\"\"\n + ak._util.exception_suffix(__file__)\n )\n\n\nclass Singleton:\n _instance = None\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n\nclass NumpyMetadata(Singleton):\n bool_ = numpy.bool_\n int8 = numpy.int8\n int16 = numpy.int16\n int32 = numpy.int32\n int64 = numpy.int64\n uint8 = numpy.uint8\n uint16 = numpy.uint16\n uint32 = numpy.uint32\n uint64 = numpy.uint64\n float32 = numpy.float32\n float64 = numpy.float64\n complex64 = numpy.complex64\n complex128 = numpy.complex128\n str_ = numpy.str_\n bytes_ = numpy.bytes_\n\n intp = numpy.intp\n integer = numpy.integer\n signedinteger = numpy.signedinteger\n unsignedinteger = numpy.unsignedinteger\n floating = numpy.floating\n number = numpy.number\n object_ = numpy.object_\n generic = numpy.generic\n\n dtype = numpy.dtype\n ufunc = numpy.ufunc\n iinfo = numpy.iinfo\n errstate = numpy.errstate\n newaxis = numpy.newaxis\n\n ndarray = numpy.ndarray\n\n nan = numpy.nan\n inf = numpy.inf\n\n nat = numpy.datetime64(\"NaT\")\n datetime_data = numpy.datetime_data\n issubdtype = numpy.issubdtype\n\n AxisError = numpy.AxisError\n\n\nif hasattr(numpy, \"float16\"):\n NumpyMetadata.float16 = numpy.float16\n\nif hasattr(numpy, \"float128\"):\n NumpyMetadata.float128 = numpy.float128\n\nif hasattr(numpy, \"complex256\"):\n NumpyMetadata.complex256 = numpy.complex256\n\nif hasattr(numpy, \"datetime64\"):\n NumpyMetadata.datetime64 = numpy.datetime64\n\nif hasattr(numpy, \"timedelta64\"):\n NumpyMetadata.timedelta64 = numpy.timedelta64\n\nNumpyMetadata.all_complex = tuple(\n getattr(numpy, x) for x in dir(NumpyMetadata) if x.startswith(\"complex\")\n)\n\n\nclass NumpyLike(Singleton):\n known_data = True\n known_shape = True\n known_dtype = True\n\n ############################ array creation\n\n def array(self, *args, **kwargs):\n # data[, dtype=[, copy=]]\n return self._module.array(*args, **kwargs)\n\n def asarray(self, *args, **kwargs):\n # array[, dtype=][, order=]\n return self._module.asarray(*args, **kwargs)\n\n def ascontiguousarray(self, *args, **kwargs):\n # array[, dtype=]\n return self._module.ascontiguousarray(*args, **kwargs)\n\n def isscalar(self, *args, **kwargs):\n return self._module.isscalar(*args, **kwargs)\n\n def frombuffer(self, *args, **kwargs):\n # array[, dtype=]\n return self._module.frombuffer(*args, **kwargs)\n\n def zeros(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.zeros(*args, **kwargs)\n\n def ones(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.ones(*args, **kwargs)\n\n def empty(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.empty(*args, **kwargs)\n\n def full(self, *args, **kwargs):\n # shape/len, value[, dtype=]\n return self._module.full(*args, **kwargs)\n\n def zeros_like(self, *args, **kwargs):\n # array\n return self._module.zeros_like(*args, **kwargs)\n\n def ones_like(self, *args, **kwargs):\n # array\n return self._module.ones_like(*args, **kwargs)\n\n def full_like(self, *args, **kwargs):\n # array, fill_value\n return self._module.full_like(*args, **kwargs)\n\n def arange(self, *args, **kwargs):\n # stop[, dtype=]\n # start, stop[, dtype=]\n # start, stop, step[, dtype=]\n return self._module.arange(*args, **kwargs)\n\n def meshgrid(self, *args, **kwargs):\n # *arrays, indexing=\"ij\"\n return self._module.meshgrid(*args, **kwargs)\n\n ############################ testing\n\n def shape(self, *args, **kwargs):\n # array\n return self._module.shape(*args, **kwargs)\n\n def array_equal(self, *args, **kwargs):\n # array1, array2\n return self._module.array_equal(*args, **kwargs)\n\n def size(self, *args, **kwargs):\n # array\n return self._module.size(*args, **kwargs)\n\n def searchsorted(self, *args, **kwargs):\n # haystack, needle, side=\"right\"\n return self._module.searchsorted(*args, **kwargs)\n\n def argsort(self, *args, **kwargs):\n # array\n return self._module.argsort(*args, **kwargs)\n\n ############################ manipulation\n\n def broadcast_arrays(self, *args, **kwargs):\n # array1[, array2[, ...]]\n return self._module.broadcast_arrays(*args, **kwargs)\n\n def cumsum(self, *args, **kwargs):\n # arrays[, out=]\n return self._module.cumsum(*args, **kwargs)\n\n def cumprod(self, *args, **kwargs):\n # arrays[, out=]\n return self._module.cumprod(*args, **kwargs)\n\n def nonzero(self, *args, **kwargs):\n # array\n return self._module.nonzero(*args, **kwargs)\n\n def unique(self, *args, **kwargs):\n # array\n return self._module.unique(*args, **kwargs)\n\n def concatenate(self, *args, **kwargs):\n # arrays\n return self._module.concatenate(*args, **kwargs)\n\n def repeat(self, *args, **kwargs):\n # array, int\n # array1, array2\n return self._module.repeat(*args, **kwargs)\n\n def stack(self, *args, **kwargs):\n # arrays\n return self._module.stack(*args, **kwargs)\n\n def vstack(self, *args, **kwargs):\n # arrays\n return self._module.vstack(*args, **kwargs)\n\n def packbits(self, *args, **kwargs):\n # array\n return self._module.packbits(*args, **kwargs)\n\n def unpackbits(self, *args, **kwargs):\n # array\n return self._module.unpackbits(*args, **kwargs)\n\n def atleast_1d(self, *args, **kwargs):\n # *arrays\n return self._module.atleast_1d(*args, **kwargs)\n\n def broadcast_to(self, *args, **kwargs):\n # array, shape\n return self._module.broadcast_to(*args, **kwargs)\n\n def append(self, *args, **kwargs):\n # array, element\n return self._module.append(*args, **kwargs)\n\n def where(self, *args, **kwargs):\n # array, element\n return self._module.where(*args, **kwargs)\n\n ############################ ufuncs\n\n def add(self, *args, **kwargs):\n # array1, array2\n return self._module.add(*args, **kwargs)\n\n def multiply(self, *args, **kwargs):\n # array1, array2\n return self._module.multiply(*args, **kwargs)\n\n def logical_or(self, *args, **kwargs):\n # array1, array2\n return self._module.logical_or(*args, **kwargs)\n\n def logical_and(self, *args, **kwargs):\n # array1, array2\n return self._module.logical_and(*args, **kwargs)\n\n def sqrt(self, *args, **kwargs):\n # array\n return self._module.sqrt(*args, **kwargs)\n\n def exp(self, *args, **kwargs):\n # array\n return self._module.exp(*args, **kwargs)\n\n def true_divide(self, *args, **kwargs):\n # array1, array2\n return self._module.true_divide(*args, **kwargs)\n\n def bitwise_or(self, *args, **kwargs):\n # array1, array2[, out=output]\n return self._module.bitwise_or(*args, **kwargs)\n\n def equal(self, *args, **kwargs):\n # array1, array2\n return self._module.equal(*args, **kwargs)\n\n def ceil(self, *args, **kwargs):\n # array\n return self._module.ceil(*args, **kwargs)\n\n def minimum(self, *args, **kwargs):\n # array1, array2\n return self._module.minimum(*args, **kwargs)\n\n def maximum(self, *args, **kwargs):\n # array1, array2\n return self._module.maximum(*args, **kwargs)\n\n ############################ almost-ufuncs\n\n def nan_to_num(self, *args, **kwargs):\n # array, copy=True, nan=0.0, posinf=None, neginf=None\n return self._module.nan_to_num(*args, **kwargs)\n\n def isclose(self, *args, **kwargs):\n # a, b, rtol=1e-05, atol=1e-08, equal_nan=False\n return self._module.isclose(*args, **kwargs)\n\n ############################ reducers\n\n def all(self, *args, **kwargs):\n # array\n return self._module.all(*args, **kwargs)\n\n def any(self, *args, **kwargs):\n # array\n kwargs.pop(\"prefer\", None)\n return self._module.any(*args, **kwargs)\n\n def count_nonzero(self, *args, **kwargs):\n # array\n return self._module.count_nonzero(*args, **kwargs)\n\n def sum(self, *args, **kwargs):\n # array\n return self._module.sum(*args, **kwargs)\n\n def prod(self, *args, **kwargs):\n # array\n return self._module.prod(*args, **kwargs)\n\n def min(self, *args, **kwargs):\n # array\n return self._module.min(*args, **kwargs)\n\n def max(self, *args, **kwargs):\n # array\n return self._module.max(*args, **kwargs)\n\n def argmin(self, *args, **kwargs):\n # array[, axis=]\n return self._module.argmin(*args, **kwargs)\n\n def argmax(self, *args, **kwargs):\n # array[, axis=]\n return self._module.argmax(*args, **kwargs)\n\n def array_str(self, *args, **kwargs):\n # array, max_line_width, precision=None, suppress_small=None\n return self._module.array_str(*args, **kwargs)\n\n def datetime_as_string(self, *args, **kwargs):\n return self._module.datetime_as_string(*args, **kwargs)\n\n\nclass NumpyKernel:\n def __init__(self, kernel, name_and_types):\n self._kernel = kernel\n self._name_and_types = name_and_types\n\n def __repr__(self):\n return \"<{} {}{}>\".format(\n type(self).__name__,\n self._name_and_types[0],\n \"\".join(\", \" + str(numpy.dtype(x)) for x in self._name_and_types[1:]),\n )\n\n @staticmethod\n def _cast(x, t):\n if issubclass(t, ctypes._Pointer):\n if isinstance(x, numpy.ndarray):\n return ctypes.cast(x.ctypes.data, t)\n else:\n return ctypes.cast(x, t)\n else:\n return x\n\n def __call__(self, *args):\n assert len(args) == len(self._kernel.argtypes)\n return self._kernel(\n *(self._cast(x, t) for x, t in zip(args, self._kernel.argtypes))\n )\n\n\nclass Numpy(NumpyLike):\n def to_rectilinear(self, array, *args, **kwargs):\n if isinstance(\n array,\n (\n ak.Array,\n ak.Record,\n ak.ArrayBuilder,\n ak.layout.Content,\n ak.layout.Record,\n ak.layout.ArrayBuilder,\n ak.layout.LayoutBuilder32,\n ak.layout.LayoutBuilder64,\n ),\n ):\n return ak.operations.convert.to_numpy(array, *args, **kwargs)\n\n elif isinstance(array, Iterable):\n return [self.to_rectilinear(x, *args, **kwargs) for x in array]\n\n else:\n raise TypeError(\"to_rectilinear argument must be iterable\")\n\n def __getitem__(self, name_and_types):\n return NumpyKernel(ak._cpu_kernels.kernel[name_and_types], name_and_types)\n\n def __init__(self):\n self._module = numpy\n\n @property\n def ma(self):\n return self._module.ma\n\n @property\n def char(self):\n return self._module.char\n\n @property\n def ndarray(self):\n return self._module.ndarray\n\n\nclass Cupy(NumpyLike):\n def to_rectilinear(self, array, *args, **kwargs):\n return ak.operations.convert.to_cupy(array, *args, **kwargs)\n\n def __getitem__(self, name_and_types):\n raise NotImplementedError(\"no CUDA in v2 yet\")\n\n def __init__(self):\n try:\n import cupy\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"\"\"to use CUDA arrays in Python, install the 'cupy' package with:\n\n pip install cupy --upgrade\n\nor\n\n conda install cupy\"\"\"\n ) from None\n self._module = cupy\n\n @property\n def ma(self):\n raise ValueError(\n \"CUDA arrays cannot have missing values until CuPy implements \"\n \"numpy.ma.MaskedArray\" + ak._util.exception_suffix(__file__)\n )\n\n @property\n def char(self):\n raise ValueError(\n \"CUDA arrays cannot do string manipulations until CuPy implements \"\n \"numpy.char\" + ak._util.exception_suffix(__file__)\n )\n\n @property\n def ndarray(self):\n return self._module.ndarray\n\n def asarray(self, array, dtype=None):\n if isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.layout.Content,\n ak.layout.Record,\n ),\n ):\n out = ak.operations.convert.to_cupy(array)\n if dtype is not None and out.dtype != dtype:\n return self._module.asarray(out, dtype=dtype)\n else:\n return out\n else:\n return self._module.asarray(array, dtype=dtype)\n\n def ascontiguousarray(self, array, dtype=None):\n if isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.layout.Content,\n ak.layout.Record,\n ),\n ):\n out = ak.operations.convert.to_cupy(array)\n if dtype is not None and out.dtype != dtype:\n return self._module.ascontiguousarray(out, dtype=dtype)\n else:\n return out\n else:\n return self._module.ascontiguousarray(array, dtype=dtype)\n\n def zeros(self, *args, **kwargs):\n return self._module.zeros(*args, **kwargs)\n\n def frombuffer(self, *args, **kwargs):\n np_array = numpy.frombuffer(*args, **kwargs)\n return self._module.array(np_array)\n\n def array_equal(self, array1, array2):\n # CuPy issue?\n if array1.shape != array2.shape:\n return False\n else:\n return self._module.all(array1 - array2 == 0)\n\n def repeat(self, array, repeats):\n # https://github.com/cupy/cupy/issues/3849\n if isinstance(repeats, self._module.ndarray):\n all_stops = self._module.cumsum(repeats)\n parents = self._module.zeros(all_stops[-1].item(), dtype=int)\n stops, stop_counts = self._module.unique(all_stops[:-1], return_counts=True)\n parents[stops] = stop_counts\n self._module.cumsum(parents, out=parents)\n return array[parents]\n else:\n return self._module.repeat(array, repeats)\n\n def nan_to_num(self, array, copy=True, nan=0.0, posinf=None, neginf=None):\n # https://github.com/cupy/cupy/issues/4867\n if copy:\n array = self._module.copy(array)\n if posinf is None:\n if array.dtype.kind == \"f\":\n posinf = numpy.finfo(array.dtype.type).max\n else:\n posinf = numpy.iinfo(array.dtype.type).max\n if neginf is None:\n if array.dtype.kind == \"f\":\n neginf = numpy.finfo(array.dtype.type).min\n else:\n neginf = numpy.iinfo(array.dtype.type).min\n\n array[self._module.isnan(array)] = nan\n array[self._module.isinf(array) & (array > 0)] = posinf\n array[self._module.isinf(array) & (array < 0)] = neginf\n return array\n\n # For all reducers: https://github.com/cupy/cupy/issues/3819\n\n def all(self, array, axis=None, **kwargs):\n kwargs.pop(\"prefer\", None)\n out = self._module.all(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def any(self, array, axis=None, **kwargs):\n kwargs.pop(\"prefer\", None)\n out = self._module.any(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def count_nonzero(self, array, axis=None):\n out = self._module.count_nonzero(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def sum(self, array, axis=None):\n out = self._module.sum(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def prod(self, array, axis=None):\n out = self._module.prod(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def min(self, array, axis=None):\n out = self._module.min(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def max(self, array, axis=None):\n out = self._module.max(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def argmin(self, array, axis=None):\n out = self._module.argmin(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def argmax(self, array, axis=None):\n out = self._module.argmax(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def array_str(\n self, array, max_line_width=None, precision=None, suppress_small=None\n ):\n # array, max_line_width, precision=None, suppress_small=None\n return self._module.array_str(array, max_line_width, precision, suppress_small)\n", "path": "src/awkward/nplike.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n# v2: keep this file, but modernize the 'of' function; ptr_lib is gone.\n\n\nimport ctypes\n\nfrom collections.abc import Iterable\n\nimport numpy\n\nimport awkward as ak\n\n\ndef of(*arrays):\n libs = set()\n for array in arrays:\n nplike = getattr(array, \"nplike\", None)\n if nplike is not None:\n libs.add(nplike)\n\n if any(isinstance(x, ak._v2._typetracer.TypeTracer) for x in libs):\n return ak._v2._typetracer.TypeTracer.instance()\n\n if libs == set():\n return Numpy.instance()\n elif len(libs) == 1:\n return next(iter(libs))\n else:\n raise ValueError(\n \"\"\"attempting to use both a 'cpu' array and a 'cuda' array in the \"\"\"\n \"\"\"same operation; use one of\n\n ak.to_kernels(array, 'cpu')\n ak.to_kernels(array, 'cuda')\n\nto move one or the other to main memory or the GPU(s).\"\"\"\n + ak._util.exception_suffix(__file__)\n )\n\n\nclass Singleton:\n _instance = None\n\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n\nclass NumpyMetadata(Singleton):\n bool_ = numpy.bool_\n int8 = numpy.int8\n int16 = numpy.int16\n int32 = numpy.int32\n int64 = numpy.int64\n uint8 = numpy.uint8\n uint16 = numpy.uint16\n uint32 = numpy.uint32\n uint64 = numpy.uint64\n float32 = numpy.float32\n float64 = numpy.float64\n complex64 = numpy.complex64\n complex128 = numpy.complex128\n str_ = numpy.str_\n bytes_ = numpy.bytes_\n\n intp = numpy.intp\n integer = numpy.integer\n signedinteger = numpy.signedinteger\n unsignedinteger = numpy.unsignedinteger\n floating = numpy.floating\n number = numpy.number\n object_ = numpy.object_\n generic = numpy.generic\n\n dtype = numpy.dtype\n ufunc = numpy.ufunc\n iinfo = numpy.iinfo\n errstate = numpy.errstate\n newaxis = numpy.newaxis\n\n ndarray = numpy.ndarray\n\n nan = numpy.nan\n inf = numpy.inf\n\n nat = numpy.datetime64(\"NaT\")\n datetime_data = numpy.datetime_data\n issubdtype = numpy.issubdtype\n\n AxisError = numpy.AxisError\n\n\nif hasattr(numpy, \"float16\"):\n NumpyMetadata.float16 = numpy.float16\n\nif hasattr(numpy, \"float128\"):\n NumpyMetadata.float128 = numpy.float128\n\nif hasattr(numpy, \"complex256\"):\n NumpyMetadata.complex256 = numpy.complex256\n\nif hasattr(numpy, \"datetime64\"):\n NumpyMetadata.datetime64 = numpy.datetime64\n\nif hasattr(numpy, \"timedelta64\"):\n NumpyMetadata.timedelta64 = numpy.timedelta64\n\nNumpyMetadata.all_complex = tuple(\n getattr(numpy, x) for x in dir(NumpyMetadata) if x.startswith(\"complex\")\n)\n\n\nclass NumpyLike(Singleton):\n known_data = True\n known_shape = True\n known_dtype = True\n\n ############################ array creation\n\n def array(self, *args, **kwargs):\n # data[, dtype=[, copy=]]\n return self._module.array(*args, **kwargs)\n\n def asarray(self, *args, **kwargs):\n # array[, dtype=][, order=]\n return self._module.asarray(*args, **kwargs)\n\n def ascontiguousarray(self, *args, **kwargs):\n # array[, dtype=]\n return self._module.ascontiguousarray(*args, **kwargs)\n\n def isscalar(self, *args, **kwargs):\n return self._module.isscalar(*args, **kwargs)\n\n def frombuffer(self, *args, **kwargs):\n # array[, dtype=]\n return self._module.frombuffer(*args, **kwargs)\n\n def zeros(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.zeros(*args, **kwargs)\n\n def ones(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.ones(*args, **kwargs)\n\n def empty(self, *args, **kwargs):\n # shape/len[, dtype=]\n return self._module.empty(*args, **kwargs)\n\n def full(self, *args, **kwargs):\n # shape/len, value[, dtype=]\n return self._module.full(*args, **kwargs)\n\n def zeros_like(self, *args, **kwargs):\n # array\n return self._module.zeros_like(*args, **kwargs)\n\n def ones_like(self, *args, **kwargs):\n # array\n return self._module.ones_like(*args, **kwargs)\n\n def full_like(self, *args, **kwargs):\n # array, fill_value\n return self._module.full_like(*args, **kwargs)\n\n def arange(self, *args, **kwargs):\n # stop[, dtype=]\n # start, stop[, dtype=]\n # start, stop, step[, dtype=]\n return self._module.arange(*args, **kwargs)\n\n def meshgrid(self, *args, **kwargs):\n # *arrays, indexing=\"ij\"\n return self._module.meshgrid(*args, **kwargs)\n\n ############################ testing\n\n def shape(self, *args, **kwargs):\n # array\n return self._module.shape(*args, **kwargs)\n\n def array_equal(self, *args, **kwargs):\n # array1, array2\n return self._module.array_equal(*args, **kwargs)\n\n def size(self, *args, **kwargs):\n # array\n return self._module.size(*args, **kwargs)\n\n def searchsorted(self, *args, **kwargs):\n # haystack, needle, side=\"right\"\n return self._module.searchsorted(*args, **kwargs)\n\n def argsort(self, *args, **kwargs):\n # array\n return self._module.argsort(*args, **kwargs)\n\n ############################ manipulation\n\n def broadcast_arrays(self, *args, **kwargs):\n # array1[, array2[, ...]]\n return self._module.broadcast_arrays(*args, **kwargs)\n\n def cumsum(self, *args, **kwargs):\n # arrays[, out=]\n return self._module.cumsum(*args, **kwargs)\n\n def cumprod(self, *args, **kwargs):\n # arrays[, out=]\n return self._module.cumprod(*args, **kwargs)\n\n def nonzero(self, *args, **kwargs):\n # array\n return self._module.nonzero(*args, **kwargs)\n\n def unique(self, *args, **kwargs):\n # array\n return self._module.unique(*args, **kwargs)\n\n def concatenate(self, *args, **kwargs):\n # arrays\n return self._module.concatenate(*args, **kwargs)\n\n def repeat(self, *args, **kwargs):\n # array, int\n # array1, array2\n return self._module.repeat(*args, **kwargs)\n\n def stack(self, *args, **kwargs):\n # arrays\n return self._module.stack(*args, **kwargs)\n\n def vstack(self, *args, **kwargs):\n # arrays\n return self._module.vstack(*args, **kwargs)\n\n def packbits(self, *args, **kwargs):\n # array\n return self._module.packbits(*args, **kwargs)\n\n def unpackbits(self, *args, **kwargs):\n # array\n return self._module.unpackbits(*args, **kwargs)\n\n def atleast_1d(self, *args, **kwargs):\n # *arrays\n return self._module.atleast_1d(*args, **kwargs)\n\n def broadcast_to(self, *args, **kwargs):\n # array, shape\n return self._module.broadcast_to(*args, **kwargs)\n\n def append(self, *args, **kwargs):\n # array, element\n return self._module.append(*args, **kwargs)\n\n def where(self, *args, **kwargs):\n # array, element\n return self._module.where(*args, **kwargs)\n\n ############################ ufuncs\n\n def add(self, *args, **kwargs):\n # array1, array2\n return self._module.add(*args, **kwargs)\n\n def multiply(self, *args, **kwargs):\n # array1, array2\n return self._module.multiply(*args, **kwargs)\n\n def logical_or(self, *args, **kwargs):\n # array1, array2\n return self._module.logical_or(*args, **kwargs)\n\n def logical_and(self, *args, **kwargs):\n # array1, array2\n return self._module.logical_and(*args, **kwargs)\n\n def sqrt(self, *args, **kwargs):\n # array\n return self._module.sqrt(*args, **kwargs)\n\n def exp(self, *args, **kwargs):\n # array\n return self._module.exp(*args, **kwargs)\n\n def true_divide(self, *args, **kwargs):\n # array1, array2\n return self._module.true_divide(*args, **kwargs)\n\n def bitwise_or(self, *args, **kwargs):\n # array1, array2[, out=output]\n return self._module.bitwise_or(*args, **kwargs)\n\n def equal(self, *args, **kwargs):\n # array1, array2\n return self._module.equal(*args, **kwargs)\n\n def ceil(self, *args, **kwargs):\n # array\n return self._module.ceil(*args, **kwargs)\n\n def minimum(self, *args, **kwargs):\n # array1, array2\n return self._module.minimum(*args, **kwargs)\n\n def maximum(self, *args, **kwargs):\n # array1, array2\n return self._module.maximum(*args, **kwargs)\n\n ############################ almost-ufuncs\n\n def nan_to_num(self, *args, **kwargs):\n # array, copy=True, nan=0.0, posinf=None, neginf=None\n return self._module.nan_to_num(*args, **kwargs)\n\n def isclose(self, *args, **kwargs):\n # a, b, rtol=1e-05, atol=1e-08, equal_nan=False\n return self._module.isclose(*args, **kwargs)\n\n ############################ reducers\n\n def all(self, *args, **kwargs):\n # array\n return self._module.all(*args, **kwargs)\n\n def any(self, *args, **kwargs):\n # array\n kwargs.pop(\"prefer\", None)\n return self._module.any(*args, **kwargs)\n\n def count_nonzero(self, *args, **kwargs):\n # array\n return self._module.count_nonzero(*args, **kwargs)\n\n def sum(self, *args, **kwargs):\n # array\n return self._module.sum(*args, **kwargs)\n\n def prod(self, *args, **kwargs):\n # array\n return self._module.prod(*args, **kwargs)\n\n def min(self, *args, **kwargs):\n # array\n return self._module.min(*args, **kwargs)\n\n def max(self, *args, **kwargs):\n # array\n return self._module.max(*args, **kwargs)\n\n def argmin(self, *args, **kwargs):\n # array[, axis=]\n return self._module.argmin(*args, **kwargs)\n\n def argmax(self, *args, **kwargs):\n # array[, axis=]\n return self._module.argmax(*args, **kwargs)\n\n def array_str(self, *args, **kwargs):\n # array, max_line_width, precision=None, suppress_small=None\n return self._module.array_str(*args, **kwargs)\n\n def datetime_as_string(self, *args, **kwargs):\n return self._module.datetime_as_string(*args, **kwargs)\n\n\nclass NumpyKernel:\n def __init__(self, kernel, name_and_types):\n self._kernel = kernel\n self._name_and_types = name_and_types\n\n def __repr__(self):\n return \"<{} {}{}>\".format(\n type(self).__name__,\n self._name_and_types[0],\n \"\".join(\", \" + str(numpy.dtype(x)) for x in self._name_and_types[1:]),\n )\n\n @staticmethod\n def _cast(x, t):\n if issubclass(t, ctypes._Pointer):\n if isinstance(x, numpy.ndarray):\n return ctypes.cast(x.ctypes.data, t)\n else:\n return ctypes.cast(x, t)\n else:\n return x\n\n def __call__(self, *args):\n assert len(args) == len(self._kernel.argtypes)\n return self._kernel(\n *(self._cast(x, t) for x, t in zip(args, self._kernel.argtypes))\n )\n\n\nclass Numpy(NumpyLike):\n def to_rectilinear(self, array, *args, **kwargs):\n if isinstance(array, numpy.ndarray):\n return array\n\n elif isinstance(\n array,\n (\n ak.Array,\n ak.Record,\n ak.ArrayBuilder,\n ak.layout.Content,\n ak.layout.Record,\n ak.layout.ArrayBuilder,\n ak.layout.LayoutBuilder32,\n ak.layout.LayoutBuilder64,\n ),\n ):\n return ak.operations.convert.to_numpy(array, *args, **kwargs)\n\n elif isinstance(array, Iterable):\n return [self.to_rectilinear(x, *args, **kwargs) for x in array]\n\n else:\n raise TypeError(\"to_rectilinear argument must be iterable\")\n\n def __getitem__(self, name_and_types):\n return NumpyKernel(ak._cpu_kernels.kernel[name_and_types], name_and_types)\n\n def __init__(self):\n self._module = numpy\n\n @property\n def ma(self):\n return self._module.ma\n\n @property\n def char(self):\n return self._module.char\n\n @property\n def ndarray(self):\n return self._module.ndarray\n\n\nclass Cupy(NumpyLike):\n def to_rectilinear(self, array, *args, **kwargs):\n return ak.operations.convert.to_cupy(array, *args, **kwargs)\n\n def __getitem__(self, name_and_types):\n raise NotImplementedError(\"no CUDA in v2 yet\")\n\n def __init__(self):\n try:\n import cupy\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"\"\"to use CUDA arrays in Python, install the 'cupy' package with:\n\n pip install cupy --upgrade\n\nor\n\n conda install cupy\"\"\"\n ) from None\n self._module = cupy\n\n @property\n def ma(self):\n raise ValueError(\n \"CUDA arrays cannot have missing values until CuPy implements \"\n \"numpy.ma.MaskedArray\" + ak._util.exception_suffix(__file__)\n )\n\n @property\n def char(self):\n raise ValueError(\n \"CUDA arrays cannot do string manipulations until CuPy implements \"\n \"numpy.char\" + ak._util.exception_suffix(__file__)\n )\n\n @property\n def ndarray(self):\n return self._module.ndarray\n\n def asarray(self, array, dtype=None):\n if isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.layout.Content,\n ak.layout.Record,\n ),\n ):\n out = ak.operations.convert.to_cupy(array)\n if dtype is not None and out.dtype != dtype:\n return self._module.asarray(out, dtype=dtype)\n else:\n return out\n else:\n return self._module.asarray(array, dtype=dtype)\n\n def ascontiguousarray(self, array, dtype=None):\n if isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.layout.Content,\n ak.layout.Record,\n ),\n ):\n out = ak.operations.convert.to_cupy(array)\n if dtype is not None and out.dtype != dtype:\n return self._module.ascontiguousarray(out, dtype=dtype)\n else:\n return out\n else:\n return self._module.ascontiguousarray(array, dtype=dtype)\n\n def zeros(self, *args, **kwargs):\n return self._module.zeros(*args, **kwargs)\n\n def frombuffer(self, *args, **kwargs):\n np_array = numpy.frombuffer(*args, **kwargs)\n return self._module.array(np_array)\n\n def array_equal(self, array1, array2):\n # CuPy issue?\n if array1.shape != array2.shape:\n return False\n else:\n return self._module.all(array1 - array2 == 0)\n\n def repeat(self, array, repeats):\n # https://github.com/cupy/cupy/issues/3849\n if isinstance(repeats, self._module.ndarray):\n all_stops = self._module.cumsum(repeats)\n parents = self._module.zeros(all_stops[-1].item(), dtype=int)\n stops, stop_counts = self._module.unique(all_stops[:-1], return_counts=True)\n parents[stops] = stop_counts\n self._module.cumsum(parents, out=parents)\n return array[parents]\n else:\n return self._module.repeat(array, repeats)\n\n def nan_to_num(self, array, copy=True, nan=0.0, posinf=None, neginf=None):\n # https://github.com/cupy/cupy/issues/4867\n if copy:\n array = self._module.copy(array)\n if posinf is None:\n if array.dtype.kind == \"f\":\n posinf = numpy.finfo(array.dtype.type).max\n else:\n posinf = numpy.iinfo(array.dtype.type).max\n if neginf is None:\n if array.dtype.kind == \"f\":\n neginf = numpy.finfo(array.dtype.type).min\n else:\n neginf = numpy.iinfo(array.dtype.type).min\n\n array[self._module.isnan(array)] = nan\n array[self._module.isinf(array) & (array > 0)] = posinf\n array[self._module.isinf(array) & (array < 0)] = neginf\n return array\n\n # For all reducers: https://github.com/cupy/cupy/issues/3819\n\n def all(self, array, axis=None, **kwargs):\n kwargs.pop(\"prefer\", None)\n out = self._module.all(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def any(self, array, axis=None, **kwargs):\n kwargs.pop(\"prefer\", None)\n out = self._module.any(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def count_nonzero(self, array, axis=None):\n out = self._module.count_nonzero(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def sum(self, array, axis=None):\n out = self._module.sum(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def prod(self, array, axis=None):\n out = self._module.prod(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def min(self, array, axis=None):\n out = self._module.min(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def max(self, array, axis=None):\n out = self._module.max(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def argmin(self, array, axis=None):\n out = self._module.argmin(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def argmax(self, array, axis=None):\n out = self._module.argmax(array, axis=axis)\n if axis is None and isinstance(out, self._module.ndarray):\n return out.item()\n else:\n return out\n\n def array_str(\n self, array, max_line_width=None, precision=None, suppress_small=None\n ):\n # array, max_line_width, precision=None, suppress_small=None\n return self._module.array_str(array, max_line_width, precision, suppress_small)\n", "path": "src/awkward/nplike.py" } ]
diff --git a/src/awkward/nplike.py b/src/awkward/nplike.py index fac7effe42..e385a0b419 100644 --- a/src/awkward/nplike.py +++ b/src/awkward/nplike.py @@ -400,7 +400,10 @@ def __call__(self, *args): class Numpy(NumpyLike): def to_rectilinear(self, array, *args, **kwargs): - if isinstance( + if isinstance(array, numpy.ndarray): + return array + + elif isinstance( array, ( ak.Array, diff --git a/tests/test_1247-numpy-to_rectilinear-ndarray.py b/tests/test_1247-numpy-to_rectilinear-ndarray.py new file mode 100644 index 0000000000..ac97c68513 --- /dev/null +++ b/tests/test_1247-numpy-to_rectilinear-ndarray.py @@ -0,0 +1,13 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +import pytest # noqa: F401 +import numpy as np # noqa: F401 +import awkward as ak # noqa: F401 + + +def test(): + array = np.array([1, 2, 9, 0]) + nplike = ak.nplike.of(array) + + ak_array = ak.from_numpy(array) + assert ak.to_list(nplike.to_rectilinear(array)) == ak.to_list(ak_array) diff --git a/tests/v2/test_1247-numpy-to_rectilinear-ndarray.py b/tests/v2/test_1247-numpy-to_rectilinear-ndarray.py new file mode 100644 index 0000000000..28177a33ea --- /dev/null +++ b/tests/v2/test_1247-numpy-to_rectilinear-ndarray.py @@ -0,0 +1,12 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +import pytest # noqa: F401 +import numpy as np # noqa: F401 +import awkward as ak # noqa: F401 + + +def test(): + array = np.array([1, 2, 9, 0]) + nplike = ak.nplike.of(array) + ak_array = ak._v2.operations.convert.from_numpy(array) + assert nplike.to_rectilinear(array).tolist() == ak_array.tolist()
sktime__sktime-5368
[BUG] `numba` related failures on main from `tslearn` `lcss` On main, it seems we have the following `numba` related failures: ``` During: resolving callee type: type(CPUDispatcher(<function _local_squared_dist at 0x14fc267a0>)) During: typing of call at /Users/runner/hostedtoolcache/Python/3.10.13/x64/lib/python3.10/site-packages/tslearn/metrics/dtw_variants.py (1421) ``` This is not related to the recent newer `numba` release as we did not upgrade yet. For test runs with the failures, see here: https://github.com/sktime/sktime/actions/runs/6424459407/job/17445296470?pr=5083
[ { "content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for tslearn distances and kernels.\"\"\"\nimport numpy as np\n\n__all__ = [\"_TslearnPwTrafoAdapter\"]\n__author__ = [\"fkiraly\"]\n\n\ndef _subset_dict(d, keys):\n \"\"\"Subsets dictionary to keys in iterable keys.\n\n Parameters\n ----------\n d : dict\n dictionary to subset\n keys : iterable\n keys to subset to\n\n Returns\n -------\n dict\n subsetted dictionary\n \"\"\"\n return {key: d[key] for key in keys if key in d}\n\n\nclass _TslearnPwTrafoAdapter:\n \"\"\"Base adapter mixin for tslearn distances and kernels.\"\"\"\n\n _tags = {\n \"symmetric\": False, # is the transformer symmetric, i.e., t(x,y)=t(y,x) always?\n \"X_inner_mtype\": \"df-list\",\n # which mtype is used internally in _transform?\n \"fit_is_empty\": True, # is \"fit\" empty? Yes, for all pairwise transforms\n \"capability:missing_values\": True, # can estimator handle missing data?\n \"capability:multivariate\": True, # can estimator handle multivariate data?\n \"pwtrafo_type\": \"distance\", # type of pw. transformer, \"kernel\" or \"distance\"\n \"python_dependencies\": [\"tslearn\"],\n }\n\n # parameters to pass to the inner tslearn estimator, list of str\n # if None, will pass all of self.get_params()\n # otherwise, passes only the parameters in the list of str _inner_params\n _inner_params = None\n\n # controls whether vectorization is applied to the tslearn pwtrafo\n # True: the adapted function is cdist-like, it can take Panel data directly\n # False: the adapted function takes two time series and needs to be vectorized\n _is_cdist = True\n\n def _get_tslearn_pwtrafo(self):\n \"\"\"Abstract method to get tslearn pwtrafo.\n\n should import and return tslearn pwtrafo\n \"\"\"\n # from tslearn import pwtrafo\n #\n # return pwtrafo\n raise NotImplementedError(\"abstract method\")\n\n def _eval_tslearn_pwtrafo(self, X, X2=None):\n \"\"\"Evaluate tslearn pwtrafo on two time series.\n\n The default returns of _get_tslearn_pwtrafo\n evaluated at X1, X2 and self.get_params\n\n Parameters\n ----------\n X, X2: 2D np.ndarrays of format (n_variables, n_timepoints)\n two time series to compute the pairwise transform on\n\n Returns\n -------\n float\n _get_tslearn_pwtrafo result evaluated at X1, X2, and self.get_params()\n \"\"\"\n if X2 is None:\n X2 = X\n\n pwtrafo = self._get_tslearn_pwtrafo()\n params = self.get_params()\n if self._inner_params is not None:\n params = _subset_dict(params, self._inner_params)\n\n return pwtrafo(X, X2, **params)\n\n def _coerce_df_list_to_list_of_arr(self, X):\n return [df.values for df in X]\n\n def _eval_tslearn_pwtrafo_vectorized(self, X, X2=None):\n \"\"\"Evaluate tslearn pwtrafo on two time series panels.\n\n Vectorizes _eval_tslearn_pwtrafo over the first dimensions.\n\n Parameters\n ----------\n X, X2: 3D np.ndarrays of format (n_instances n_variables, n_timepoints)\n two time series panels to compute the pairwise transform on\n\n Returns\n -------\n 2D np.ndarray\n (i, j)-th entry is _eval_tslearn_pwtrafo(self, X1[i], X2[j])\n \"\"\"\n if X2 is None:\n X2 = X\n\n m = len(X)\n n = len(X2)\n res = np.zeros((m, n))\n for i in range(m):\n for j in range(n):\n res[i, j] = self._eval_tslearn_pwtrafo(X[i], X2[j])\n return res\n\n def _transform(self, X, X2=None):\n \"\"\"Compute distance/kernel matrix.\n\n Core logic\n\n Behaviour: returns pairwise distance/kernel matrix\n between samples in X and X2\n if X2 is not passed, is equal to X\n if X/X2 is a pd.DataFrame and contains non-numeric columns,\n these are removed before computation\n\n Parameters\n ----------\n X: 3D np.array of shape [num_instances, num_vars, num_time_points]\n X2: 3D np.array of shape [num_instances, num_vars, num_time_points], optional\n default X2 = X\n\n Returns\n -------\n distmat: np.array of shape [n, m]\n (i,j)-th entry contains distance/kernel between X[i] and X2[j]\n \"\"\"\n if isinstance(X, list):\n X = self._coerce_df_list_to_list_of_arr(X)\n if isinstance(X2, list):\n X2 = self._coerce_df_list_to_list_of_arr(X2)\n\n return self._eval_tslearn_pwtrafo(X, X2)\n", "path": "sktime/dists_kernels/base/adapters/_tslearn.py" } ]
[ { "content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for tslearn distances and kernels.\"\"\"\nimport numpy as np\n\n__all__ = [\"_TslearnPwTrafoAdapter\"]\n__author__ = [\"fkiraly\"]\n\n\ndef _subset_dict(d, keys):\n \"\"\"Subsets dictionary to keys in iterable keys.\n\n Parameters\n ----------\n d : dict\n dictionary to subset\n keys : iterable\n keys to subset to\n\n Returns\n -------\n dict\n subsetted dictionary\n \"\"\"\n return {key: d[key] for key in keys if key in d}\n\n\nclass _TslearnPwTrafoAdapter:\n \"\"\"Base adapter mixin for tslearn distances and kernels.\"\"\"\n\n _tags = {\n \"symmetric\": False, # is the transformer symmetric, i.e., t(x,y)=t(y,x) always?\n \"X_inner_mtype\": \"df-list\",\n # which mtype is used internally in _transform?\n \"fit_is_empty\": True, # is \"fit\" empty? Yes, for all pairwise transforms\n \"capability:missing_values\": True, # can estimator handle missing data?\n \"capability:multivariate\": True, # can estimator handle multivariate data?\n \"pwtrafo_type\": \"distance\", # type of pw. transformer, \"kernel\" or \"distance\"\n \"python_dependencies\": [\"tslearn\"],\n }\n\n # parameters to pass to the inner tslearn estimator, list of str\n # if None, will pass all of self.get_params()\n # otherwise, passes only the parameters in the list of str _inner_params\n _inner_params = None\n\n # controls whether vectorization is applied to the tslearn pwtrafo\n # True: the adapted function is cdist-like, it can take Panel data directly\n # False: the adapted function takes two time series and needs to be vectorized\n _is_cdist = True\n\n def _get_tslearn_pwtrafo(self):\n \"\"\"Abstract method to get tslearn pwtrafo.\n\n should import and return tslearn pwtrafo\n \"\"\"\n # from tslearn import pwtrafo\n #\n # return pwtrafo\n raise NotImplementedError(\"abstract method\")\n\n def _eval_tslearn_pwtrafo(self, X, X2=None):\n \"\"\"Evaluate tslearn pwtrafo on two time series.\n\n The default returns of _get_tslearn_pwtrafo\n evaluated at X1, X2 and self.get_params\n\n Parameters\n ----------\n X, X2: 2D np.ndarrays of format (n_variables, n_timepoints)\n two time series to compute the pairwise transform on\n\n Returns\n -------\n float\n _get_tslearn_pwtrafo result evaluated at X1, X2, and self.get_params()\n \"\"\"\n if X2 is None:\n X2 = X\n\n pwtrafo = self._get_tslearn_pwtrafo()\n params = self.get_params()\n if self._inner_params is not None:\n params = _subset_dict(params, self._inner_params)\n\n return pwtrafo(X, X2, **params)\n\n def _coerce_df_list_to_list_of_arr(self, X):\n return [df.values for df in X]\n\n def _eval_tslearn_pwtrafo_vectorized(self, X, X2=None):\n \"\"\"Evaluate tslearn pwtrafo on two time series panels.\n\n Vectorizes _eval_tslearn_pwtrafo over the first dimensions.\n\n Parameters\n ----------\n X, X2: 3D np.ndarrays of format (n_instances n_variables, n_timepoints)\n two time series panels to compute the pairwise transform on\n\n Returns\n -------\n 2D np.ndarray\n (i, j)-th entry is _eval_tslearn_pwtrafo(self, X1[i], X2[j])\n \"\"\"\n if X2 is None:\n X2 = X\n\n m = len(X)\n n = len(X2)\n res = np.zeros((m, n))\n for i in range(m):\n for j in range(n):\n res[i, j] = self._eval_tslearn_pwtrafo(X[i], X2[j])\n return res\n\n def _transform(self, X, X2=None):\n \"\"\"Compute distance/kernel matrix.\n\n Core logic\n\n Behaviour: returns pairwise distance/kernel matrix\n between samples in X and X2\n if X2 is not passed, is equal to X\n if X/X2 is a pd.DataFrame and contains non-numeric columns,\n these are removed before computation\n\n Parameters\n ----------\n X: 3D np.array of shape [num_instances, num_vars, num_time_points]\n X2: 3D np.array of shape [num_instances, num_vars, num_time_points], optional\n default X2 = X\n\n Returns\n -------\n distmat: np.array of shape [n, m]\n (i,j)-th entry contains distance/kernel between X[i] and X2[j]\n \"\"\"\n if isinstance(X, list):\n X = self._coerce_df_list_to_list_of_arr(X)\n if isinstance(X2, list):\n X2 = self._coerce_df_list_to_list_of_arr(X2)\n\n if self._is_cdist:\n return self._eval_tslearn_pwtrafo(X, X2)\n else:\n return self._eval_tslearn_pwtrafo_vectorized(X, X2)\n", "path": "sktime/dists_kernels/base/adapters/_tslearn.py" } ]
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 09ffafa2073..af09cdb481b 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -2,7 +2,7 @@ Contributors ============ <!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section --> -[![All Contributors](https://img.shields.io/badge/all_contributors-230-orange.svg)](#contributors) +[![All Contributors](https://img.shields.io/badge/all_contributors-232-orange.svg)](#contributors) <!-- ALL-CONTRIBUTORS-BADGE:END --> This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! @@ -30,13 +30,14 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/alex-hh"><img src="https://avatars.githubusercontent.com/u/5719745?v=4?s=100" width="100px;" alt="Alex Hawkins-Hooker"/><br /><sub><b>Alex Hawkins-Hooker</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=alex-hh" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://medium.com/@alexandra.amidon"><img src="https://avatars2.githubusercontent.com/u/17050655?v=4?s=100" width="100px;" alt="Alexandra Amidon"/><br /><sub><b>Alexandra Amidon</b></sub></a><br /><a href="#blog-lynnssi" title="Blogposts">📝</a> <a href="https://github.com/sktime/sktime/commits?author=lynnssi" title="Documentation">📖</a> <a href="#ideas-lynnssi" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/alexfilothodoros"><img src="https://avatars.githubusercontent.com/u/6419847?v=4?s=100" width="100px;" alt="Alexandros Filothodoros"/><br /><sub><b>Alexandros Filothodoros</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=alexfilothodoros" title="Documentation">📖</a> <a href="#maintenance-alexfilothodoros" title="Maintenance">🚧</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/ali-parizad"><img src="https://avatars.githubusercontent.com/u/13907016?v=4?s=100" width="100px;" alt="Ali Parizad"/><br /><sub><b>Ali Parizad</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ali-parizad" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ali-tny"><img src="https://avatars.githubusercontent.com/u/26010073?v=4?s=100" width="100px;" alt="Ali Teeney"/><br /><sub><b>Ali Teeney</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ali-tny" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/myprogrammerpersonality"><img src="https://avatars.githubusercontent.com/u/49058167?v=4?s=100" width="100px;" alt="Ali Yazdizadeh"/><br /><sub><b>Ali Yazdizadeh</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=myprogrammerpersonality" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/alwinw"><img src="https://avatars3.githubusercontent.com/u/16846521?v=4?s=100" width="100px;" alt="Alwin"/><br /><sub><b>Alwin</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=alwinw" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=alwinw" title="Code">💻</a> <a href="#maintenance-alwinw" title="Maintenance">🚧</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/AnH0ang"><img src="?s=100" width="100px;" alt="An Hoang"/><br /><sub><b>An Hoang</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3AAnH0ang" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=AnH0ang" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/akanz1"><img src="https://avatars3.githubusercontent.com/u/51492342?v=4?s=100" width="100px;" alt="Andreas Kanz"/><br /><sub><b>Andreas Kanz</b></sub></a><br /><a href="#tutorial-akanz1" title="Tutorials">✅</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/akanz1"><img src="https://avatars3.githubusercontent.com/u/51492342?v=4?s=100" width="100px;" alt="Andreas Kanz"/><br /><sub><b>Andreas Kanz</b></sub></a><br /><a href="#tutorial-akanz1" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/angus924"><img src="https://avatars0.githubusercontent.com/u/55837131?v=4?s=100" width="100px;" alt="Angus Dempster"/><br /><sub><b>Angus Dempster</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=angus924" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=angus924" title="Tests">⚠️</a> <a href="#tutorial-angus924" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/yarnabrina/"><img src="https://avatars.githubusercontent.com/u/39331844?v=4?s=100" width="100px;" alt="Anirban Ray"/><br /><sub><b>Anirban Ray</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Ayarnabrina" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=yarnabrina" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=yarnabrina" title="Documentation">📖</a> <a href="#ideas-yarnabrina" title="Ideas, Planning, & Feedback">🤔</a> <a href="#maintenance-yarnabrina" title="Maintenance">🚧</a> <a href="#mentoring-yarnabrina" title="Mentoring">🧑‍🏫</a> <a href="#question-yarnabrina" title="Answering Questions">💬</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Ayarnabrina" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/sktime/sktime/commits?author=yarnabrina" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/yard1/"><img src="https://avatars.githubusercontent.com/u/10364161?v=4?s=100" width="100px;" alt="Antoni Baum"/><br /><sub><b>Antoni Baum</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Yard1" title="Code">💻</a></td> @@ -45,9 +46,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/bugslayer-332"><img src="?s=100" width="100px;" alt="Arepalli Yashwanth Reddy"/><br /><sub><b>Arepalli Yashwanth Reddy</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=bugslayer-332" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Abugslayer-332" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=bugslayer-332" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ermshaua/"><img src="https://avatars.githubusercontent.com/u/23294512?v=4?s=100" width="100px;" alt="Arik Ermshaus"/><br /><sub><b>Arik Ermshaus</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ermshaua" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/arnau-jim%C3%A9nez-castany-b2ba2597/"><img src="https://avatars.githubusercontent.com/u/38285979?s=400&u=8bdd0021cb5bae47ba5bd69c355c694dc3090f5e&v=4?s=100" width="100px;" alt="Arnau"/><br /><sub><b>Arnau</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Arnau" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/arnavrneo"><img src="https://avatars.githubusercontent.com/u/48650781?v=4?s=100" width="100px;" alt="Arnav"/><br /><sub><b>Arnav</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=arnavrneo" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/arnavrneo"><img src="https://avatars.githubusercontent.com/u/48650781?v=4?s=100" width="100px;" alt="Arnav"/><br /><sub><b>Arnav</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=arnavrneo" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/ayushmaan-seth-4a96364a/"><img src="https://avatars1.githubusercontent.com/u/29939762?v=4?s=100" width="100px;" alt="Ayushmaan Seth"/><br /><sub><b>Ayushmaan Seth</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Ayushmaanseth" title="Code">💻</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3AAyushmaanseth" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/sktime/sktime/commits?author=Ayushmaanseth" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/commits?author=Ayushmaanseth" title="Documentation">📖</a> <a href="#eventOrganizing-Ayushmaanseth" title="Event Organizing">📋</a> <a href="#tutorial-Ayushmaanseth" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/BandaSaiTejaReddy"><img src="https://avatars0.githubusercontent.com/u/31387911?v=4?s=100" width="100px;" alt="BANDASAITEJAREDDY"/><br /><sub><b>BANDASAITEJAREDDY</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=BandaSaiTejaReddy" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=BandaSaiTejaReddy" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/badrmarani"><img src="https://avatars.githubusercontent.com/badrmarani?s=100" width="100px;" alt="Badr-Eddine Marani"/><br /><sub><b>Badr-Eddine Marani</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=badrmarani" title="Code">💻</a></td> @@ -56,9 +57,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://haskarb.github.io/"><img src="https://avatars.githubusercontent.com/u/20501023?v=4?s=100" width="100px;" alt="Bhaskar Dhariyal"/><br /><sub><b>Bhaskar Dhariyal</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=haskarb" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=haskarb" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/BINAYKUMAR943"><img src="https://avatars.githubusercontent.com/u/38756834?v=4?s=100" width="100px;" alt="Binay Kumar"/><br /><sub><b>Binay Kumar</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=BINAYKUMAR943" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=BINAYKUMAR943" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=BINAYKUMAR943" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://angelpone.github.io/"><img src="https://avatars.githubusercontent.com/u/32930283?v=4?s=100" width="100px;" alt="Bohan Zhang"/><br /><sub><b>Bohan Zhang</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=AngelPone" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/boukepostma"><img src="https://avatars.githubusercontent.com/boukepostma?s=100" width="100px;" alt="Bouke Postma"/><br /><sub><b>Bouke Postma</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=boukepostma" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Aboukepostma" title="Bug reports">🐛</a> <a href="#ideas-boukepostma" title="Ideas, Planning, & Feedback">🤔</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/boukepostma"><img src="https://avatars.githubusercontent.com/boukepostma?s=100" width="100px;" alt="Bouke Postma"/><br /><sub><b>Bouke Postma</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=boukepostma" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Aboukepostma" title="Bug reports">🐛</a> <a href="#ideas-boukepostma" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://bmurphyportfolio.netlify.com/"><img src="https://avatars2.githubusercontent.com/u/32182553?v=4?s=100" width="100px;" alt="Brian Murphy"/><br /><sub><b>Brian Murphy</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=bmurdata" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Carlosbogo"><img src="https://avatars.githubusercontent.com/u/84228424?v=4?s=100" width="100px;" alt="Carlos Borrajo"/><br /><sub><b>Carlos Borrajo</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Carlosbogo" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=Carlosbogo" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/vnmabus"><img src="https://avatars1.githubusercontent.com/u/2364173?v=4?s=100" width="100px;" alt="Carlos Ramos Carreño"/><br /><sub><b>Carlos Ramos Carreño</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=vnmabus" title="Documentation">📖</a></td> @@ -67,9 +68,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/ckastner"><img src="https://avatars.githubusercontent.com/u/15859947?v=4?s=100" width="100px;" alt="Christian Kastner"/><br /><sub><b>Christian Kastner</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ckastner" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Ackastner" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/hoesler/"><img src="https://avatars.githubusercontent.com/u/1052770?v=4?s=100" width="100px;" alt="Christoph Hösler"/><br /><sub><b>Christoph Hösler</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=hoesler" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/cdahlin"><img src="https://avatars.githubusercontent.com/u/1567780?v=4?s=100" width="100px;" alt="Christopher Dahlin"/><br /><sub><b>Christopher Dahlin</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=cdahlin" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/topher-lo"><img src="?s=100" width="100px;" alt="Christopher Lo"/><br /><sub><b>Christopher Lo</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=topher-lo" title="Code">💻</a> <a href="#ideas-topher-lo" title="Ideas, Planning, & Feedback">🤔</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/topher-lo"><img src="?s=100" width="100px;" alt="Christopher Lo"/><br /><sub><b>Christopher Lo</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=topher-lo" title="Code">💻</a> <a href="#ideas-topher-lo" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Gigi1111"><img src="https://avatars.githubusercontent.com/Gigi1111?s=100" width="100px;" alt="Chung-Fan Tsai"/><br /><sub><b>Chung-Fan Tsai</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Gigi1111" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ciaran-g"><img src="https://avatars.githubusercontent.com/u/41995662?v=4?s=100" width="100px;" alt="Ciaran Gilbert"/><br /><sub><b>Ciaran Gilbert</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Aciaran-g" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=ciaran-g" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=ciaran-g" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=ciaran-g" title="Tests">⚠️</a> <a href="#ideas-ciaran-g" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ClaudiaSanches"><img src="https://avatars3.githubusercontent.com/u/28742178?v=4?s=100" width="100px;" alt="ClaudiaSanches"/><br /><sub><b>ClaudiaSanches</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ClaudiaSanches" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=ClaudiaSanches" title="Tests">⚠️</a></td> @@ -78,9 +79,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/DBCerigo"><img src="https://avatars.githubusercontent.com/u/8318425?v=4?s=100" width="100px;" alt="Daniel Burkhardt Cerigo"/><br /><sub><b>Daniel Burkhardt Cerigo</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=DBCerigo" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/daniel-martin-martinez"><img src="https://avatars.githubusercontent.com/dainelli98?s=100" width="100px;" alt="Daniel Martín Martínez"/><br /><sub><b>Daniel Martín Martínez</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=dainelli98" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Adainelli98" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/dashapetr"><img src="https://avatars.githubusercontent.com/u/54349415?v=4?s=100" width="100px;" alt="Darya Petrashka"/><br /><sub><b>Darya Petrashka</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=dashapetr" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://dhirschfeld.github.io/"><img src="https://avatars1.githubusercontent.com/u/881019?v=4?s=100" width="100px;" alt="Dave Hirschfeld"/><br /><sub><b>Dave Hirschfeld</b></sub></a><br /><a href="#infra-dhirschfeld" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://dhirschfeld.github.io/"><img src="https://avatars1.githubusercontent.com/u/881019?v=4?s=100" width="100px;" alt="Dave Hirschfeld"/><br /><sub><b>Dave Hirschfeld</b></sub></a><br /><a href="#infra-dhirschfeld" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/davidbp"><img src="https://avatars3.githubusercontent.com/u/4223580?v=4?s=100" width="100px;" alt="David Buchaca Prats"/><br /><sub><b>David Buchaca Prats</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=davidbp" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/davidgilbertson"><img src="https://avatars.githubusercontent.com/u/4443482?v=4?s=100" width="100px;" alt="David Gilbertson"/><br /><sub><b>David Gilbertson</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=davidgilbertson" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Adavidgilbertson" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="http://www.uco.es/grupos/ayrna/index.php/es/publicaciones/articulos?publications_view_all=1&theses_view_all=0&projects_view_all=0&task=show&view=member&id=22"><img src="https://avatars1.githubusercontent.com/u/47889499?v=4?s=100" width="100px;" alt="David Guijo Rubio"/><br /><sub><b>David Guijo Rubio</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=dguijo" title="Code">💻</a> <a href="#ideas-dguijo" title="Ideas, Planning, & Feedback">🤔</a></td> @@ -89,9 +90,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/Dbhasin1"><img src="https://avatars.githubusercontent.com/u/56479884?v=4?s=100" width="100px;" alt="Drishti Bhasin "/><br /><sub><b>Drishti Bhasin </b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Dbhasin1" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/dsherry"><img src="https://avatars.githubusercontent.com/dsherry?s=100" width="100px;" alt="Dylan Sherry"/><br /><sub><b>Dylan Sherry</b></sub></a><br /><a href="#infra-dsherry" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Emiliathewolf"><img src="https://avatars2.githubusercontent.com/u/22026218?v=4?s=100" width="100px;" alt="Emilia Rose"/><br /><sub><b>Emilia Rose</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Emiliathewolf" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=Emiliathewolf" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/erjieyong"><img src="https://avatars.githubusercontent.com/u/109052378?v=4?s=100" width="100px;" alt="Er Jie Yong"/><br /><sub><b>Er Jie Yong</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Aerjieyong" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=erjieyong" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/erjieyong"><img src="https://avatars.githubusercontent.com/u/109052378?v=4?s=100" width="100px;" alt="Er Jie Yong"/><br /><sub><b>Er Jie Yong</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Aerjieyong" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=erjieyong" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/evanmiller29"><img src="https://avatars2.githubusercontent.com/u/8062590?v=4?s=100" width="100px;" alt="Evan Miller"/><br /><sub><b>Evan Miller</b></sub></a><br /><a href="#tutorial-evanmiller29" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/eyalshafran"><img src="https://avatars.githubusercontent.com/u/16999574?v=4?s=100" width="100px;" alt="Eyal Shafran"/><br /><sub><b>Eyal Shafran</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=eyalshafran" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/eyjo"><img src="https://avatars.githubusercontent.com/eyjo?s=100" width="100px;" alt="Eyjólfur Sigurðsson"/><br /><sub><b>Eyjólfur Sigurðsson</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=eyjo" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=eyjo" title="Documentation">📖</a></td> @@ -100,9 +101,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/flix6x"><img src="https://avatars.githubusercontent.com/u/30658763?v=4?s=100" width="100px;" alt="Felix Claessen"/><br /><sub><b>Felix Claessen</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Flix6x" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=Flix6x" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=Flix6x" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/issues?q=author%3AFlix6x" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/fstinner"><img src="https://avatars.githubusercontent.com/u/11679462?v=4?s=100" width="100px;" alt="Florian Stinner"/><br /><sub><b>Florian Stinner</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=fstinner" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=fstinner" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/fkiraly"><img src="https://avatars1.githubusercontent.com/u/7985502?v=4?s=100" width="100px;" alt="Franz Kiraly"/><br /><sub><b>Franz Kiraly</b></sub></a><br /><a href="#blog-fkiraly" title="Blogposts">📝</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Afkiraly" title="Bug reports">🐛</a> <a href="#business-fkiraly" title="Business development">💼</a> <a href="https://github.com/sktime/sktime/commits?author=fkiraly" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=fkiraly" title="Documentation">📖</a> <a href="#design-fkiraly" title="Design">🎨</a> <a href="#eventOrganizing-fkiraly" title="Event Organizing">📋</a> <a href="#example-fkiraly" title="Examples">💡</a> <a href="#financial-fkiraly" title="Financial">💵</a> <a href="#fundingFinding-fkiraly" title="Funding Finding">🔍</a> <a href="#ideas-fkiraly" title="Ideas, Planning, & Feedback">🤔</a> <a href="#maintenance-fkiraly" title="Maintenance">🚧</a> <a href="#mentoring-fkiraly" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-fkiraly" title="Project Management">📆</a> <a href="#question-fkiraly" title="Answering Questions">💬</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Afkiraly" title="Reviewed Pull Requests">👀</a> <a href="#talk-fkiraly" title="Talks">📢</a> <a href="https://github.com/sktime/sktime/commits?author=fkiraly" title="Tests">⚠️</a> <a href="#tutorial-fkiraly" title="Tutorials">✅</a> <a href="#video-fkiraly" title="Videos">📹</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/freddyaboulton"><img src="https://avatars.githubusercontent.com/u/41651716?v=4?s=100" width="100px;" alt="Freddy A Boulton"/><br /><sub><b>Freddy A Boulton</b></sub></a><br /><a href="#infra-freddyaboulton" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/sktime/sktime/commits?author=freddyaboulton" title="Tests">⚠️</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/freddyaboulton"><img src="https://avatars.githubusercontent.com/u/41651716?v=4?s=100" width="100px;" alt="Freddy A Boulton"/><br /><sub><b>Freddy A Boulton</b></sub></a><br /><a href="#infra-freddyaboulton" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/sktime/sktime/commits?author=freddyaboulton" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/chernika158"><img src="https://avatars.githubusercontent.com/u/43787741?s=400&v=4?s=100" width="100px;" alt="Galina Chernikova"/><br /><sub><b>Galina Chernikova</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=chernika158" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/goastler"><img src="https://avatars0.githubusercontent.com/u/7059456?v=4?s=100" width="100px;" alt="George Oastler"/><br /><sub><b>George Oastler</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=goastler" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=goastler" title="Tests">⚠️</a> <a href="#platform-goastler" title="Packaging/porting to new platform">📦</a> <a href="#example-goastler" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/commits?author=goastler" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/gracewgao/"><img src="https://avatars0.githubusercontent.com/u/38268331?v=4?s=100" width="100px;" alt="Grace Gao"/><br /><sub><b>Grace Gao</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=gracewgao" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Agracewgao" title="Bug reports">🐛</a></td> @@ -111,9 +112,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/BensHamza"><img src="https://avatars.githubusercontent.com/u/96446862?v=4?s=100" width="100px;" alt="Hamza Benslimane"/><br /><sub><b>Hamza Benslimane</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3ABensHamza" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=BensHamza" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/hazrulakmal"><img src="https://avatars.githubusercontent.com/u/24774385?v=4?s=100" width="100px;" alt="Hazrul Akmal"/><br /><sub><b>Hazrul Akmal</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=hazrulakmal" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=hazrulakmal" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Ahazrulakmal" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=hazrulakmal" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/hliebert"><img src="https://avatars.githubusercontent.com/u/20834265?s=100" width="100px;" alt="Helge Liebert"/><br /><sub><b>Helge Liebert</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Ahliebert" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=hliebert" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/huayiwei/"><img src="https://avatars3.githubusercontent.com/u/22870735?v=4?s=100" width="100px;" alt="Huayi Wei"/><br /><sub><b>Huayi Wei</b></sub></a><br /><a href="#tutorial-huayicodes" title="Tutorials">✅</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/huayiwei/"><img src="https://avatars3.githubusercontent.com/u/22870735?v=4?s=100" width="100px;" alt="Huayi Wei"/><br /><sub><b>Huayi Wei</b></sub></a><br /><a href="#tutorial-huayicodes" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Ifeanyi30"><img src="https://avatars.githubusercontent.com/u/49926145?v=4?s=100" width="100px;" alt="Ifeanyi30"/><br /><sub><b>Ifeanyi30</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Ifeanyi30" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/iljamaurer"><img src="https://avatars.githubusercontent.com/u/45882103?v=4?s=100" width="100px;" alt="Ilja Maurer"/><br /><sub><b>Ilja Maurer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=iljamaurer" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/IlyasMoutawwakil"><img src="https://avatars.githubusercontent.com/IlyasMoutawwakil?s=100" width="100px;" alt="Ilyas Moutawwakil"/><br /><sub><b>Ilyas Moutawwakil</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=IlyasMoutawwakil" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=IlyasMoutawwakil" title="Documentation">📖</a></td> @@ -122,9 +123,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/jnrusson1"><img src="https://avatars.githubusercontent.com/u/51986332?v=4?s=100" width="100px;" alt="Jack Russon"/><br /><sub><b>Jack Russon</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jnrusson1" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="http://www.timeseriesclassification.com/"><img src="https://avatars0.githubusercontent.com/u/44509982?v=4?s=100" width="100px;" alt="James Large"/><br /><sub><b>James Large</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=James-Large" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=James-Large" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=James-Large" title="Tests">⚠️</a> <a href="#infra-James-Large" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#maintenance-James-Large" title="Maintenance">🚧</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/jambo6"><img src="https://https://avatars.githubusercontent.com/jambo6?s=100" width="100px;" alt="James Morrill"/><br /><sub><b>James Morrill</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jambo6" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/janpipek"><img src="https://avatars.githubusercontent.com/janpipek?s=100" width="100px;" alt="Jan Pipek"/><br /><sub><b>Jan Pipek</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=janpipek" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/janpipek"><img src="https://avatars.githubusercontent.com/janpipek?s=100" width="100px;" alt="Jan Pipek"/><br /><sub><b>Jan Pipek</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=janpipek" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/jasmineliaw"><img src="?s=100" width="100px;" alt="Jasmine Liaw"/><br /><sub><b>Jasmine Liaw</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jasmineliaw" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="http://www.timeseriesclassification.com"><img src="https://avatars1.githubusercontent.com/u/38794632?v=4?s=100" width="100px;" alt="Jason Lines"/><br /><sub><b>Jason Lines</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jasonlines" title="Code">💻</a> <a href="#business-jasonlines" title="Business development">💼</a> <a href="https://github.com/sktime/sktime/commits?author=jasonlines" title="Documentation">📖</a> <a href="#design-jasonlines" title="Design">🎨</a> <a href="#eventOrganizing-jasonlines" title="Event Organizing">📋</a> <a href="#fundingFinding-jasonlines" title="Funding Finding">🔍</a> <a href="#ideas-jasonlines" title="Ideas, Planning, & Feedback">🤔</a> <a href="#projectManagement-jasonlines" title="Project Management">📆</a> <a href="#question-jasonlines" title="Answering Questions">💬</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Ajasonlines" title="Reviewed Pull Requests">👀</a> <a href="#talk-jasonlines" title="Talks">📢</a> <a href="#example-jasonlines" title="Examples">💡</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/whackteachers"><img src="https://avatars0.githubusercontent.com/u/33785383?v=4?s=100" width="100px;" alt="Jason Pong"/><br /><sub><b>Jason Pong</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=whackteachers" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=whackteachers" title="Tests">⚠️</a></td> @@ -133,9 +134,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/jorenham"><img src="?s=100" width="100px;" alt="Joren Hammudoglu"/><br /><sub><b>Joren Hammudoglu</b></sub></a><br /><a href="#infra-jorenham" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://juanitorduz.github.io/"><img src="https://avatars1.githubusercontent.com/u/22996444?v=4?s=100" width="100px;" alt="Juan Orduz"/><br /><sub><b>Juan Orduz</b></sub></a><br /><a href="#tutorial-juanitorduz" title="Tutorials">✅</a> <a href="https://github.com/sktime/sktime/commits?author=juanitorduz" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/julia-kraus"><img src="https://avatars.githubusercontent.com/julia-kraus?s=100" width="100px;" alt="Julia Kraus"/><br /><sub><b>Julia Kraus</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=julia-kraus" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=julia-kraus" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=julia-kraus" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/jelc53"><img src="?s=100" width="100px;" alt="Julian Cooper"/><br /><sub><b>Julian Cooper</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jelc53" title="Code">💻</a> <a href="#ideas-jelc53" title="Ideas, Planning, & Feedback">🤔</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/jelc53"><img src="?s=100" width="100px;" alt="Julian Cooper"/><br /><sub><b>Julian Cooper</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jelc53" title="Code">💻</a> <a href="#ideas-jelc53" title="Ideas, Planning, & Feedback">🤔</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/julnow"><img src="https://avatars.githubusercontent.com/u/21206185?v=4?s=100" width="100px;" alt="Julian Nowak"/><br /><sub><b>Julian Nowak</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Ajulnow" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=julnow" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/julianarn/"><img src="https://avatars.githubusercontent.com/u/19613567?v=4?s=100" width="100px;" alt="Juliana"/><br /><sub><b>Juliana</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=julramos" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.justinshenk.com/"><img src="https://avatars.githubusercontent.com/u/10270308?v=4?s=100" width="100px;" alt="Justin Shenk"/><br /><sub><b>Justin Shenk</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=justinshenk" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/kcc-lion"><img src="?s=100" width="100px;" alt="Kai Lion"/><br /><sub><b>Kai Lion</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=kcc-lion" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=kcc-lion" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/commits?author=kcc-lion" title="Documentation">📖</a></td> @@ -143,10 +145,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://kejsitake.com/"><img src="https://avatars.githubusercontent.com/u/23707808?v=4?s=100" width="100px;" alt="Kejsi Take"/><br /><sub><b>Kejsi Take</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=kejsitake" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/kevinlam2"><img src="https://avatars.githubusercontent.com/u/114420932?s=400&v=4?s=100" width="100px;" alt="Kevin Lam"/><br /><sub><b>Kevin Lam</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=klam-data" title="Code">💻</a> <a href="#example-klam-data" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/commits?author=klam-data" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://whitakerlab.github.io/"><img src="https://avatars1.githubusercontent.com/u/3626306?v=4?s=100" width="100px;" alt="Kirstie Whitaker"/><br /><sub><b>Kirstie Whitaker</b></sub></a><br /><a href="#ideas-KirstieJane" title="Ideas, Planning, & Feedback">🤔</a> <a href="#fundingFinding-KirstieJane" title="Funding Finding">🔍</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/kishmanani"><img src="https://avatars.githubusercontent.com/u/30973056?v=4?s=100" width="100px;" alt="Kishan Manani"/><br /><sub><b>Kishan Manani</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/issues?q=author%3AKishManani" title="Bug reports">🐛</a> <a href="#ideas-KishManani" title="Ideas, Planning, & Feedback">🤔</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/krumeto"><img src="https://avatars3.githubusercontent.com/u/11272436?v=4?s=100" width="100px;" alt="Krum Arnaudov"/><br /><sub><b>Krum Arnaudov</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Akrumeto" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=krumeto" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/kishmanani"><img src="https://avatars.githubusercontent.com/u/30973056?v=4?s=100" width="100px;" alt="Kishan Manani"/><br /><sub><b>Kishan Manani</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=KishManani" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/issues?q=author%3AKishManani" title="Bug reports">🐛</a> <a href="#ideas-KishManani" title="Ideas, Planning, & Feedback">🤔</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/krumeto"><img src="https://avatars3.githubusercontent.com/u/11272436?v=4?s=100" width="100px;" alt="Krum Arnaudov"/><br /><sub><b>Krum Arnaudov</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Akrumeto" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=krumeto" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/koralturkk"><img src="https://avatars2.githubusercontent.com/u/18037789?s=460&v=4?s=100" width="100px;" alt="Kutay Koralturk"/><br /><sub><b>Kutay Koralturk</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=koralturkk" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Akoralturkk" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ltsaprounis"><img src="https://avatars.githubusercontent.com/u/64217214?v=4?s=100" width="100px;" alt="Leonidas Tsaprounis"/><br /><sub><b>Leonidas Tsaprounis</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ltsaprounis" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Altsaprounis" title="Bug reports">🐛</a> <a href="#mentoring-ltsaprounis" title="Mentoring">🧑‍🏫</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Altsaprounis" title="Reviewed Pull Requests">👀</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/lielleravid"><img src="https://avatars.githubusercontent.com/u/37774194?v=4?s=100" width="100px;" alt="Lielle Ravid"/><br /><sub><b>Lielle Ravid</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=lielleravid" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=lielleravid" title="Documentation">📖</a></td> @@ -154,10 +156,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="http://lpantano.github.io/"><img src="https://avatars2.githubusercontent.com/u/1621788?v=4?s=100" width="100px;" alt="Lorena Pantano"/><br /><sub><b>Lorena Pantano</b></sub></a><br /><a href="#ideas-lpantano" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ltoniazzi"><img src="https://avatars.githubusercontent.com/u/61414566?s=100" width="100px;" alt="Lorenzo Toniazzi"/><br /><sub><b>Lorenzo Toniazzi</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ltoniazzi" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Lovkush-A"><img src="https://avatars.githubusercontent.com/u/25344832?v=4?s=100" width="100px;" alt="Lovkush"/><br /><sub><b>Lovkush</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Lovkush-A" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=Lovkush-A" title="Tests">⚠️</a> <a href="#ideas-Lovkush-A" title="Ideas, Planning, & Feedback">🤔</a> <a href="#mentoring-Lovkush-A" title="Mentoring">🧑‍🏫</a> <a href="#projectManagement-Lovkush-A" title="Project Management">📆</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/luca-miniati"><img src="https://avatars.githubusercontent.com/u/87467600?v=4?s=100" width="100px;" alt="Luca Miniati"/><br /><sub><b>Luca Miniati</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=luca-miniati" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=luca-miniati" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/lbventura"><img src="https://avatars.githubusercontent.com/u/68004282?s=96&v=4?s=100" width="100px;" alt="Luis Ventura"/><br /><sub><b>Luis Ventura</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=lbventura" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/luca-miniati"><img src="https://avatars.githubusercontent.com/u/87467600?v=4?s=100" width="100px;" alt="Luca Miniati"/><br /><sub><b>Luca Miniati</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=luca-miniati" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=luca-miniati" title="Documentation">📖</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/lbventura"><img src="https://avatars.githubusercontent.com/u/68004282?s=96&v=4?s=100" width="100px;" alt="Luis Ventura"/><br /><sub><b>Luis Ventura</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=lbventura" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/luiszugasti"><img src="https://avatars.githubusercontent.com/u/11198457?s=460&u=0645b72683e491824aca16db9702f1d3eb990389&v=4?s=100" width="100px;" alt="Luis Zugasti"/><br /><sub><b>Luis Zugasti</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=luiszugasti" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/lmmentel"><img src="https://avatars.githubusercontent.com/u/8989838?v=4?s=100" width="100px;" alt="Lukasz Mentel"/><br /><sub><b>Lukasz Mentel</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=lmmentel" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=lmmentel" title="Documentation">📖</a> <a href="#infra-lmmentel" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="https://github.com/sktime/sktime/commits?author=lmmentel" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Almmentel" title="Bug reports">🐛</a> <a href="#maintenance-lmmentel" title="Maintenance">🚧</a> <a href="#mentoring-lmmentel" title="Mentoring">🧑‍🏫</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/marrov"><img src="https://avatars.githubusercontent.com/u/54272586?v=4?s=100" width="100px;" alt="Marc Rovira"/><br /><sub><b>Marc Rovira</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=marrov" title="Documentation">📖</a></td> @@ -165,10 +167,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/MarcoGorelli"><img src="https://avatars2.githubusercontent.com/u/33491632?v=4?s=100" width="100px;" alt="Marco Gorelli"/><br /><sub><b>Marco Gorelli</b></sub></a><br /><a href="#infra-MarcoGorelli" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/margaret-gorlin/"><img src="?s=100" width="100px;" alt="Margaret Gorlin"/><br /><sub><b>Margaret Gorlin</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mgorlin" title="Code">💻</a> <a href="#example-mgorlin" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/commits?author=mgorlin" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/mariamjabara"><img src="?s=100" width="100px;" alt="Mariam Jabara"/><br /><sub><b>Mariam Jabara</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mariamjabara" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://twitter.com/marielli"><img src="https://avatars2.githubusercontent.com/u/13499809?v=4?s=100" width="100px;" alt="Marielle"/><br /><sub><b>Marielle</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=marielledado" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=marielledado" title="Code">💻</a> <a href="#ideas-marielledado" title="Ideas, Planning, & Feedback">🤔</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/mloning"><img src="https://avatars3.githubusercontent.com/u/21020482?v=4?s=100" width="100px;" alt="Markus Löning"/><br /><sub><b>Markus Löning</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mloning" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=mloning" title="Tests">⚠️</a> <a href="#maintenance-mloning" title="Maintenance">🚧</a> <a href="#platform-mloning" title="Packaging/porting to new platform">📦</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Amloning" title="Reviewed Pull Requests">👀</a> <a href="#infra-mloning" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#example-mloning" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Amloning" title="Bug reports">🐛</a> <a href="#tutorial-mloning" title="Tutorials">✅</a> <a href="#business-mloning" title="Business development">💼</a> <a href="https://github.com/sktime/sktime/commits?author=mloning" title="Documentation">📖</a> <a href="#design-mloning" title="Design">🎨</a> <a href="#eventOrganizing-mloning" title="Event Organizing">📋</a> <a href="#fundingFinding-mloning" title="Funding Finding">🔍</a> <a href="#ideas-mloning" title="Ideas, Planning, & Feedback">🤔</a> <a href="#projectManagement-mloning" title="Project Management">📆</a> <a href="#question-mloning" title="Answering Questions">💬</a> <a href="#talk-mloning" title="Talks">📢</a> <a href="#mentoring-mloning" title="Mentoring">🧑‍🏫</a> <a href="#video-mloning" title="Videos">📹</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://twitter.com/marielli"><img src="https://avatars2.githubusercontent.com/u/13499809?v=4?s=100" width="100px;" alt="Marielle"/><br /><sub><b>Marielle</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=marielledado" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=marielledado" title="Code">💻</a> <a href="#ideas-marielledado" title="Ideas, Planning, & Feedback">🤔</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/mloning"><img src="https://avatars3.githubusercontent.com/u/21020482?v=4?s=100" width="100px;" alt="Markus Löning"/><br /><sub><b>Markus Löning</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mloning" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=mloning" title="Tests">⚠️</a> <a href="#maintenance-mloning" title="Maintenance">🚧</a> <a href="#platform-mloning" title="Packaging/porting to new platform">📦</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Amloning" title="Reviewed Pull Requests">👀</a> <a href="#infra-mloning" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a> <a href="#example-mloning" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Amloning" title="Bug reports">🐛</a> <a href="#tutorial-mloning" title="Tutorials">✅</a> <a href="#business-mloning" title="Business development">💼</a> <a href="https://github.com/sktime/sktime/commits?author=mloning" title="Documentation">📖</a> <a href="#design-mloning" title="Design">🎨</a> <a href="#eventOrganizing-mloning" title="Event Organizing">📋</a> <a href="#fundingFinding-mloning" title="Funding Finding">🔍</a> <a href="#ideas-mloning" title="Ideas, Planning, & Feedback">🤔</a> <a href="#projectManagement-mloning" title="Project Management">📆</a> <a href="#question-mloning" title="Answering Questions">💬</a> <a href="#talk-mloning" title="Talks">📢</a> <a href="#mentoring-mloning" title="Mentoring">🧑‍🏫</a> <a href="#video-mloning" title="Videos">📹</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/martin-walter-1a33b3114/"><img src="https://avatars0.githubusercontent.com/u/29627036?v=4?s=100" width="100px;" alt="Martin Walter"/><br /><sub><b>Martin Walter</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=aiwalter" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Aaiwalter" title="Bug reports">🐛</a> <a href="#projectManagement-aiwalter" title="Project Management">📆</a> <a href="#fundingFinding-aiwalter" title="Funding Finding">🔍</a> <a href="#mentoring-aiwalter" title="Mentoring">🧑‍🏫</a> <a href="#ideas-aiwalter" title="Ideas, Planning, & Feedback">🤔</a> <a href="#design-aiwalter" title="Design">🎨</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Aaiwalter" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/sktime/sktime/commits?author=aiwalter" title="Documentation">📖</a> <a href="#talk-aiwalter" title="Talks">📢</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/martinagvilas"><img src="https://avatars2.githubusercontent.com/u/37339384?v=4?s=100" width="100px;" alt="Martina G. Vilas"/><br /><sub><b>Martina G. Vilas</b></sub></a><br /><a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Amartinagvilas" title="Reviewed Pull Requests">👀</a> <a href="#ideas-martinagvilas" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/MCRE-BE"><img src="https://avatars.githubusercontent.com/u/99316631?s=100" width="100px;" alt="Mathias Creemers"/><br /><sub><b>Mathias Creemers</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3AMCRE-BE" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=MCRE-BE" title="Code">💻</a></td> @@ -176,10 +178,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/solen0id"><img src="https://avatars.githubusercontent.com/u/20767606?v=4?s=100" width="100px;" alt="Max Patzelt"/><br /><sub><b>Max Patzelt</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=solen0id" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Hephaest"><img src="https://avatars2.githubusercontent.com/u/37981444?v=4?s=100" width="100px;" alt="Miao Cai"/><br /><sub><b>Miao Cai</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3AHephaest" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=Hephaest" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="michaelfeil.eu"><img src="https://avatars.githubusercontent.com/u/63565275?v=4?s=100" width="100px;" alt="Michael Feil"/><br /><sub><b>Michael Feil</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=michaelfeil" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=michaelfeil" title="Tests">⚠️</a> <a href="#ideas-michaelfeil" title="Ideas, Planning, & Feedback">🤔</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/mgazian000"><img src="https://avatars.githubusercontent.com/mgazian000?s=100" width="100px;" alt="Michael Gaziani"/><br /><sub><b>Michael Gaziani</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mgazian000" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/MichalChromcak"><img src="https://avatars1.githubusercontent.com/u/12393430?v=4?s=100" width="100px;" alt="Michal Chromcak"/><br /><sub><b>Michal Chromcak</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Tests">⚠️</a> <a href="#tutorial-MichalChromcak" title="Tutorials">✅</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/mgazian000"><img src="https://avatars.githubusercontent.com/mgazian000?s=100" width="100px;" alt="Michael Gaziani"/><br /><sub><b>Michael Gaziani</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mgazian000" title="Documentation">📖</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/MichalChromcak"><img src="https://avatars1.githubusercontent.com/u/12393430?v=4?s=100" width="100px;" alt="Michal Chromcak"/><br /><sub><b>Michal Chromcak</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=MichalChromcak" title="Tests">⚠️</a> <a href="#tutorial-MichalChromcak" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/miraep8"><img src="https://avatars.githubusercontent.com/u/10511777?s=400&u=10a774fd4be767fa3b23a82a98bbfe102c17f0f3&v=4?s=100" width="100px;" alt="Mirae Parker"/><br /><sub><b>Mirae Parker</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=miraep8" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=miraep8" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/MBristle"><img src="https://avatars.githubusercontent.com/MBristle?s=100" width="100px;" alt="Mirko Bristle"/><br /><sub><b>Mirko Bristle</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=MBristle" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://mo-saif.github.io/"><img src="https://avatars0.githubusercontent.com/u/27867617?v=4?s=100" width="100px;" alt="Mohammed Saif Kazamel"/><br /><sub><b>Mohammed Saif Kazamel</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3AMo-Saif" title="Bug reports">🐛</a></td> @@ -187,10 +189,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/Multivin12"><img src="https://avatars3.githubusercontent.com/u/36476633?v=4?s=100" width="100px;" alt="Multivin12"/><br /><sub><b>Multivin12</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Multivin12" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=Multivin12" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/marcio55afr"><img src="https://avatars.githubusercontent.com/u/42646282?v=4?s=100" width="100px;" alt="Márcio A. Freitas Jr"/><br /><sub><b>Márcio A. Freitas Jr</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=marcio55afr" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/niekvanderlaan"><img src="https://avatars.githubusercontent.com/u/9962825?v=4?s=100" width="100px;" alt="Niek van der Laan"/><br /><sub><b>Niek van der Laan</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=niekvanderlaan" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/ngupta23"><img src="https://avatars0.githubusercontent.com/u/33585645?v=4?s=100" width="100px;" alt="Nikhil Gupta"/><br /><sub><b>Nikhil Gupta</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ngupta23" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Angupta23" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=ngupta23" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/nshahpazov/"><img src="https://avatars.githubusercontent.com/nshahpazov?s=100" width="100px;" alt="Nikola Shahpazov"/><br /><sub><b>Nikola Shahpazov</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=nshahpazov" title="Documentation">📖</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/ngupta23"><img src="https://avatars0.githubusercontent.com/u/33585645?v=4?s=100" width="100px;" alt="Nikhil Gupta"/><br /><sub><b>Nikhil Gupta</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ngupta23" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Angupta23" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=ngupta23" title="Documentation">📖</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/nshahpazov/"><img src="https://avatars.githubusercontent.com/nshahpazov?s=100" width="100px;" alt="Nikola Shahpazov"/><br /><sub><b>Nikola Shahpazov</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=nshahpazov" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/nilesh05apr"><img src="https://avatars.githubusercontent.com/u/65773314?v=4?s=100" width="100px;" alt="Nilesh Kumar"/><br /><sub><b>Nilesh Kumar</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=nilesh05apr" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ninfueng"><img src="https://avatars2.githubusercontent.com/u/28499769?v=4?s=100" width="100px;" alt="Ninnart Fuengfusin"/><br /><sub><b>Ninnart Fuengfusin</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ninfueng" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/NoaBenAmi"><img src="https://avatars.githubusercontent.com/u/37590002?v=4?s=100" width="100px;" alt="Noa Ben Ami"/><br /><sub><b>Noa Ben Ami</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=NoaBenAmi" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=NoaBenAmi" title="Tests">⚠️</a> <a href="https://github.com/sktime/sktime/commits?author=NoaBenAmi" title="Documentation">📖</a></td> @@ -198,10 +200,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/kachayev"><img src="https://avatars.githubusercontent.com/u/485647?v=4?s=100" width="100px;" alt="Oleksii Kachaiev"/><br /><sub><b>Oleksii Kachaiev</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=kachayev" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=kachayev" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/olivermatthews"><img src="https://avatars.githubusercontent.com/u/31141490?v=4?s=100" width="100px;" alt="Oliver Matthews"/><br /><sub><b>Oliver Matthews</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=OliverMatthews" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/prockenschaub"><img src="https://avatars0.githubusercontent.com/u/15381732?v=4?s=100" width="100px;" alt="Patrick Rockenschaub"/><br /><sub><b>Patrick Rockenschaub</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=prockenschaub" title="Code">💻</a> <a href="#design-prockenschaub" title="Design">🎨</a> <a href="#ideas-prockenschaub" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/commits?author=prockenschaub" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="http://www2.informatik.hu-berlin.de/~schaefpa/"><img src="https://avatars0.githubusercontent.com/u/7783034?v=4?s=100" width="100px;" alt="Patrick Schäfer"/><br /><sub><b>Patrick Schäfer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=patrickzib" title="Code">💻</a> <a href="#tutorial-patrickzib" title="Tutorials">✅</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://ber.gp"><img src="https://avatars1.githubusercontent.com/u/9824244?v=4?s=100" width="100px;" alt="Paul"/><br /><sub><b>Paul</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Pangoraw" title="Documentation">📖</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="http://www2.informatik.hu-berlin.de/~schaefpa/"><img src="https://avatars0.githubusercontent.com/u/7783034?v=4?s=100" width="100px;" alt="Patrick Schäfer"/><br /><sub><b>Patrick Schäfer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=patrickzib" title="Code">💻</a> <a href="#tutorial-patrickzib" title="Tutorials">✅</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://ber.gp"><img src="https://avatars1.githubusercontent.com/u/9824244?v=4?s=100" width="100px;" alt="Paul"/><br /><sub><b>Paul</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Pangoraw" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/paulyim97/"><img src="https://avatars.githubusercontent.com/pyyim?s=100" width="100px;" alt="Paul Yim"/><br /><sub><b>Paul Yim</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=pyyim" title="Code">💻</a> <a href="#example-pyyim" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/commits?author=pyyim" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.imes.uni-hannover.de/de/institut/team/m-sc-karl-philipp-kortmann/"><img src="https://avatars.githubusercontent.com/u/20466981?v=4?s=100" width="100px;" alt="Philipp Kortmann"/><br /><sub><b>Philipp Kortmann</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=MrPr3ntice" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=MrPr3ntice" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Piyush1729"><img src="https://avatars2.githubusercontent.com/u/64950012?v=4?s=100" width="100px;" alt="Piyush Gade"/><br /><sub><b>Piyush Gade</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Piyush1729" title="Code">💻</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3APiyush1729" title="Reviewed Pull Requests">👀</a></td> @@ -209,10 +211,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/pul95"><img src="https://avatars.githubusercontent.com/pul95?s=100" width="100px;" alt="Pulkit Verma"/><br /><sub><b>Pulkit Verma</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=pul95" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Quaterion"><img src="https://avatars2.githubusercontent.com/u/23200273?v=4?s=100" width="100px;" alt="Quaterion"/><br /><sub><b>Quaterion</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3AQuaterion" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/rakshitha123"><img src="https://avatars.githubusercontent.com/u/7654679?v=4?s=100" width="100px;" alt="Rakshitha Godahewa"/><br /><sub><b>Rakshitha Godahewa</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=rakshitha123" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=rakshitha123" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/Ram0nB"><img src="https://avatars.githubusercontent.com/u/45173421?s=100" width="100px;" alt="Ramon Bussing"/><br /><sub><b>Ramon Bussing</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Ram0nB" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=Ram0nB" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/RavenRudi"><img src="https://avatars.githubusercontent.com/u/46402968?v=4?s=100" width="100px;" alt="RavenRudi"/><br /><sub><b>RavenRudi</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=RavenRudi" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/Ram0nB"><img src="https://avatars.githubusercontent.com/u/45173421?s=100" width="100px;" alt="Ramon Bussing"/><br /><sub><b>Ramon Bussing</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Ram0nB" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=Ram0nB" title="Code">💻</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/RavenRudi"><img src="https://avatars.githubusercontent.com/u/46402968?v=4?s=100" width="100px;" alt="RavenRudi"/><br /><sub><b>RavenRudi</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=RavenRudi" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/wolph"><img src="?s=100" width="100px;" alt="Rick van Hattem"/><br /><sub><b>Rick van Hattem</b></sub></a><br /><a href="#infra-wolph" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Ris-Bali"><img src="https://avatars.githubusercontent.com/u/81592570?v=4?s=100" width="100px;" alt="Rishabh Bali"/><br /><sub><b>Rishabh Bali</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Ris-Bali" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/RishiKumarRay"><img src="https://avatars.githubusercontent.com/u/87641376?v=4?s=100" width="100px;" alt="Rishi Kumar Ray"/><br /><sub><b>Rishi Kumar Ray</b></sub></a><br /><a href="#infra-RishiKumarRay" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> @@ -220,10 +222,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/romanlutz/"><img src="https://avatars.githubusercontent.com/u/10245648?v=4?s=100" width="100px;" alt="Roman Lutz"/><br /><sub><b>Roman Lutz</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=romanlutz" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ronnie-llamado"><img src="https://avatars.githubusercontent.com/ronnie-llamado?s=100" width="100px;" alt="Ronnie Llamado"/><br /><sub><b>Ronnie Llamado</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ronnie-llamado" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/rnkuhns"><img src="https://avatars0.githubusercontent.com/u/26907244?v=4?s=100" width="100px;" alt="Ryan Kuhns"/><br /><sub><b>Ryan Kuhns</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=RNKuhns" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=RNKuhns" title="Documentation">📖</a> <a href="#tutorial-RNKuhns" title="Tutorials">✅</a> <a href="#example-RNKuhns" title="Examples">💡</a> <a href="#ideas-RNKuhns" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3ARNKuhns" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/sktime/sktime/commits?author=RNKuhns" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/achieveordie"><img src="https://avatars.githubusercontent.com/u/54197164?v=4?s=100" width="100px;" alt="Sagar Mishra"/><br /><sub><b>Sagar Mishra</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=achieveordie" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://sajay.online"><img src="https://avatars2.githubusercontent.com/u/25329624?v=4?s=100" width="100px;" alt="Sajaysurya Ganesh"/><br /><sub><b>Sajaysurya Ganesh</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Documentation">📖</a> <a href="#design-sajaysurya" title="Design">🎨</a> <a href="#example-sajaysurya" title="Examples">💡</a> <a href="#ideas-sajaysurya" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Tests">⚠️</a> <a href="#tutorial-sajaysurya" title="Tutorials">✅</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/achieveordie"><img src="https://avatars.githubusercontent.com/u/54197164?v=4?s=100" width="100px;" alt="Sagar Mishra"/><br /><sub><b>Sagar Mishra</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=achieveordie" title="Tests">⚠️</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://sajay.online"><img src="https://avatars2.githubusercontent.com/u/25329624?v=4?s=100" width="100px;" alt="Sajaysurya Ganesh"/><br /><sub><b>Sajaysurya Ganesh</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Documentation">📖</a> <a href="#design-sajaysurya" title="Design">🎨</a> <a href="#example-sajaysurya" title="Examples">💡</a> <a href="#ideas-sajaysurya" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/commits?author=sajaysurya" title="Tests">⚠️</a> <a href="#tutorial-sajaysurya" title="Tutorials">✅</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/SamiAlavi"><img src="https://avatars.githubusercontent.com/u/32700289?v=4?s=100" width="100px;" alt="Sami Alavi"/><br /><sub><b>Sami Alavi</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=SamiAlavi" title="Code">💻</a> <a href="#maintenance-SamiAlavi" title="Maintenance">🚧</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/Saransh-cpp"><img src="https://avatars.githubusercontent.com/u/74055102?v=4?s=100" width="100px;" alt="Saransh Chopra"/><br /><sub><b>Saransh Chopra</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=Saransh-cpp" title="Documentation">📖</a> <a href="#infra-Saransh-cpp" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/satya-pattnaik-77a430144/"><img src="https://avatars.githubusercontent.com/u/22102468?v=4?s=100" width="100px;" alt="Satya Prakash Pattnaik"/><br /><sub><b>Satya Prakash Pattnaik</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=satya-pattnaik" title="Documentation">📖</a></td> @@ -231,10 +233,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/SebasKoel"><img src="https://avatars3.githubusercontent.com/u/66252156?v=4?s=100" width="100px;" alt="Sebastiaan Koel"/><br /><sub><b>Sebastiaan Koel</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=SebasKoel" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=SebasKoel" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/shagn"><img src="https://avatars.githubusercontent.com/u/16029092?v=4?s=100" width="100px;" alt="Sebastian Hagn"/><br /><sub><b>Sebastian Hagn</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=shagn" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ShivamPathak99"><img src="https://avatars.githubusercontent.com/u/98941325?s=400&v=4?s=100" width="100px;" alt="Shivam Pathak"/><br /><sub><b>Shivam Pathak</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ShivamPathak99" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/AurumnPegasus"><img src="https://avatars.githubusercontent.com/u/54315149?v=4?s=100" width="100px;" alt="Shivansh Subramanian"/><br /><sub><b>Shivansh Subramanian</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=AurumnPegasus" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=AurumnPegasus" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/solomon-botchway-a1383821b/"><img src="https://avatars.githubusercontent.com/u/62394255?v=4?s=100" width="100px;" alt="Solomon Botchway"/><br /><sub><b>Solomon Botchway</b></sub></a><br /><a href="#maintenance-snnbotchway" title="Maintenance">🚧</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/AurumnPegasus"><img src="https://avatars.githubusercontent.com/u/54315149?v=4?s=100" width="100px;" alt="Shivansh Subramanian"/><br /><sub><b>Shivansh Subramanian</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=AurumnPegasus" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=AurumnPegasus" title="Code">💻</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/solomon-botchway-a1383821b/"><img src="https://avatars.githubusercontent.com/u/62394255?v=4?s=100" width="100px;" alt="Solomon Botchway"/><br /><sub><b>Solomon Botchway</b></sub></a><br /><a href="#maintenance-snnbotchway" title="Maintenance">🚧</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/khrapovs"><img src="https://avatars.githubusercontent.com/u/3774663?v=4?s=100" width="100px;" alt="Stanislav Khrapov"/><br /><sub><b>Stanislav Khrapov</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=khrapovs" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/SveaMeyer13"><img src="https://avatars.githubusercontent.com/u/46671894?v=4?s=100" width="100px;" alt="Svea Marie Meyer"/><br /><sub><b>Svea Marie Meyer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=SveaMeyer13" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=SveaMeyer13" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/TNTran92"><img src="https://avatars.githubusercontent.com/u/55965636?v=4?s=100" width="100px;" alt="TNTran92"/><br /><sub><b>TNTran92</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=TNTran92" title="Code">💻</a></td> @@ -242,10 +244,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://thayeylolu.github.io/portfolio/"><img src="https://avatars.githubusercontent.com/u/13348874?v=4?s=100" width="100px;" alt="Taiwo Owoseni"/><br /><sub><b>Taiwo Owoseni</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=thayeylolu" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/lnthach"><img src="https://avatars0.githubusercontent.com/u/7788363?v=4?s=100" width="100px;" alt="Thach Le Nguyen"/><br /><sub><b>Thach Le Nguyen</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=lnthach" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=lnthach" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/mathco-wf"><img src="https://avatars.githubusercontent.com/mathco-wf?s=100" width="100px;" alt="TheMathcompay Widget Factory Team"/><br /><sub><b>TheMathcompay Widget Factory Team</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=mathco-wf" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/tombh"><img src="https://avatars.githubusercontent.com/u/160835?s=80&v=4?s=100" width="100px;" alt="Thomas Buckley-Houston"/><br /><sub><b>Thomas Buckley-Houston</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Atombh" title="Bug reports">🐛</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/xxl4tomxu98"><img src="https://avatars.githubusercontent.com/u/62292177?s=40&v=4?s=100" width="100px;" alt="Tom Xu"/><br /><sub><b>Tom Xu</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=xxl4tomxu98" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=xxl4tomxu98" title="Documentation">📖</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/tombh"><img src="https://avatars.githubusercontent.com/u/160835?s=80&v=4?s=100" width="100px;" alt="Thomas Buckley-Houston"/><br /><sub><b>Thomas Buckley-Houston</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Atombh" title="Bug reports">🐛</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/xxl4tomxu98"><img src="https://avatars.githubusercontent.com/u/62292177?s=40&v=4?s=100" width="100px;" alt="Tom Xu"/><br /><sub><b>Tom Xu</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=xxl4tomxu98" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=xxl4tomxu98" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/tch"><img src="https://avatars3.githubusercontent.com/u/184076?v=4?s=100" width="100px;" alt="Tomasz Chodakowski"/><br /><sub><b>Tomasz Chodakowski</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=tch" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=tch" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Atch" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="http://www.timeseriesclassification.com"><img src="https://avatars1.githubusercontent.com/u/9594042?v=4?s=100" width="100px;" alt="Tony Bagnall"/><br /><sub><b>Tony Bagnall</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=TonyBagnall" title="Code">💻</a> <a href="#business-TonyBagnall" title="Business development">💼</a> <a href="https://github.com/sktime/sktime/commits?author=TonyBagnall" title="Documentation">📖</a> <a href="#design-TonyBagnall" title="Design">🎨</a> <a href="#eventOrganizing-TonyBagnall" title="Event Organizing">📋</a> <a href="#fundingFinding-TonyBagnall" title="Funding Finding">🔍</a> <a href="#ideas-TonyBagnall" title="Ideas, Planning, & Feedback">🤔</a> <a href="#projectManagement-TonyBagnall" title="Project Management">📆</a> <a href="#question-TonyBagnall" title="Answering Questions">💬</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3ATonyBagnall" title="Reviewed Pull Requests">👀</a> <a href="#talk-TonyBagnall" title="Talks">📢</a> <a href="#data-TonyBagnall" title="Data">🔣</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/utsavcoding"><img src="https://avatars3.githubusercontent.com/u/55446385?v=4?s=100" width="100px;" alt="Utsav Kumar Tiwari"/><br /><sub><b>Utsav Kumar Tiwari</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=utsavcoding" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=utsavcoding" title="Documentation">📖</a></td> @@ -253,10 +255,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/ViktorKaz"><img src="https://avatars0.githubusercontent.com/u/33499138?v=4?s=100" width="100px;" alt="ViktorKaz"/><br /><sub><b>ViktorKaz</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ViktorKaz" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=ViktorKaz" title="Documentation">📖</a> <a href="#design-ViktorKaz" title="Design">🎨</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/VyomkeshVyas"><img src="?s=100" width="100px;" alt="Vyomkesh Vyas"/><br /><sub><b>Vyomkesh Vyas</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=VyomkeshVyas" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=VyomkeshVyas" title="Documentation">📖</a> <a href="#example-VyomkeshVyas" title="Examples">💡</a> <a href="https://github.com/sktime/sktime/commits?author=VyomkeshVyas" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://www.linkedin.com/in/templierw/"><img src="https://github.com/templierw.png?s=100" width="100px;" alt="William Templier"/><br /><sub><b>William Templier</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=templierw" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/magittan"><img src="https://avatars0.githubusercontent.com/u/14024202?v=4?s=100" width="100px;" alt="William Zheng"/><br /><sub><b>William Zheng</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=magittan" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=magittan" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/yairbeer"><img src="https://avatars.githubusercontent.com/yairbeer?s=100" width="100px;" alt="Yair Beer"/><br /><sub><b>Yair Beer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=yairbeer" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/magittan"><img src="https://avatars0.githubusercontent.com/u/14024202?v=4?s=100" width="100px;" alt="William Zheng"/><br /><sub><b>William Zheng</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=magittan" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=magittan" title="Tests">⚠️</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/yairbeer"><img src="https://avatars.githubusercontent.com/yairbeer?s=100" width="100px;" alt="Yair Beer"/><br /><sub><b>Yair Beer</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=yairbeer" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/yashlamba"><img src="https://avatars.githubusercontent.com/u/44164398?v=4?s=100" width="100px;" alt="Yash Lamba"/><br /><sub><b>Yash Lamba</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=yashlamba" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/xuyxu"><img src="https://avatars2.githubusercontent.com/u/22359569?v=4?s=100" width="100px;" alt="Yi-Xuan Xu"/><br /><sub><b>Yi-Xuan Xu</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=xuyxu" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=xuyxu" title="Tests">⚠️</a> <a href="#maintenance-xuyxu" title="Maintenance">🚧</a> <a href="https://github.com/sktime/sktime/commits?author=xuyxu" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/ZiyaoWei"><img src="https://avatars.githubusercontent.com/u/940823?v=4?s=100" width="100px;" alt="Ziyao Wei"/><br /><sub><b>Ziyao Wei</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=ZiyaoWei" title="Code">💻</a></td> @@ -264,10 +266,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/abandus"><img src="https://avatars2.githubusercontent.com/u/46486474?v=4?s=100" width="100px;" alt="abandus"/><br /><sub><b>abandus</b></sub></a><br /><a href="#ideas-abandus" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/commits?author=abandus" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/adoherty21"><img src="https://avatars.githubusercontent.com/u/52799751?s=400&v=4?s=100" width="100px;" alt="adoherty21"/><br /><sub><b>adoherty21</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Aadoherty21" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/bethrice44"><img src="https://avatars.githubusercontent.com/u/11226988?v=4?s=100" width="100px;" alt="bethrice44"/><br /><sub><b>bethrice44</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Abethrice44" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=bethrice44" title="Code">💻</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Abethrice44" title="Reviewed Pull Requests">👀</a> <a href="https://github.com/sktime/sktime/commits?author=bethrice44" title="Tests">⚠️</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/big-o"><img src="https://avatars1.githubusercontent.com/u/1134151?v=4?s=100" width="100px;" alt="big-o"/><br /><sub><b>big-o</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=big-o" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=big-o" title="Tests">⚠️</a> <a href="#design-big-o" title="Design">🎨</a> <a href="#ideas-big-o" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Abig-o" title="Reviewed Pull Requests">👀</a> <a href="#tutorial-big-o" title="Tutorials">✅</a> <a href="#mentoring-big-o" title="Mentoring">🧑‍🏫</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/bobbys-dev"><img src="https://avatars.githubusercontent.com/bobbys-dev?s=100" width="100px;" alt="bobbys"/><br /><sub><b>bobbys</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=bobbys-dev" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/big-o"><img src="https://avatars1.githubusercontent.com/u/1134151?v=4?s=100" width="100px;" alt="big-o"/><br /><sub><b>big-o</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=big-o" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=big-o" title="Tests">⚠️</a> <a href="#design-big-o" title="Design">🎨</a> <a href="#ideas-big-o" title="Ideas, Planning, & Feedback">🤔</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Abig-o" title="Reviewed Pull Requests">👀</a> <a href="#tutorial-big-o" title="Tutorials">✅</a> <a href="#mentoring-big-o" title="Mentoring">🧑‍🏫</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/bobbys-dev"><img src="https://avatars.githubusercontent.com/bobbys-dev?s=100" width="100px;" alt="bobbys"/><br /><sub><b>bobbys</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=bobbys-dev" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/brettkoonce"><img src="https://avatars2.githubusercontent.com/u/11281814?v=4?s=100" width="100px;" alt="brett koonce"/><br /><sub><b>brett koonce</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=brettkoonce" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/btrtts"><img src="https://avatars3.githubusercontent.com/u/66252156?v=4?s=100" width="100px;" alt="btrtts"/><br /><sub><b>btrtts</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=btrtts" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/chizzi25"><img src="https://avatars3.githubusercontent.com/u/67911243?v=4?s=100" width="100px;" alt="chizzi25"/><br /><sub><b>chizzi25</b></sub></a><br /><a href="#blog-chizzi25" title="Blogposts">📝</a></td> @@ -275,10 +277,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/danbartl"><img src="https://avatars.githubusercontent.com/u/19947407?v=4?s=100" width="100px;" alt="danbartl"/><br /><sub><b>danbartl</b></sub></a><br /><a href="https://github.com/sktime/sktime/issues?q=author%3Adanbartl" title="Bug reports">🐛</a> <a href="https://github.com/sktime/sktime/commits?author=danbartl" title="Code">💻</a> <a href="https://github.com/sktime/sktime/pulls?q=is%3Apr+reviewed-by%3Adanbartl" title="Reviewed Pull Requests">👀</a> <a href="#talk-danbartl" title="Talks">📢</a> <a href="https://github.com/sktime/sktime/commits?author=danbartl" title="Tests">⚠️</a> <a href="#tutorial-danbartl" title="Tutorials">✅</a> <a href="#video-danbartl" title="Videos">📹</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/hamzahiqb"><img src="https://avatars3.githubusercontent.com/u/10302415?v=4?s=100" width="100px;" alt="hamzahiqb"/><br /><sub><b>hamzahiqb</b></sub></a><br /><a href="#infra-hamzahiqb" title="Infrastructure (Hosting, Build-Tools, etc)">🚇</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/hiqbal2"><img src="https://avatars3.githubusercontent.com/u/10302415?v=4?s=100" width="100px;" alt="hiqbal2"/><br /><sub><b>hiqbal2</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=hiqbal2" title="Documentation">📖</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/jesellier"><img src="https://avatars0.githubusercontent.com/u/51952076?v=4?s=100" width="100px;" alt="jesellier"/><br /><sub><b>jesellier</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jesellier" title="Code">💻</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/jschemm"><img src="https://avatars.githubusercontent.com/u/81151346?v=4?s=100" width="100px;" alt="jschemm"/><br /><sub><b>jschemm</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jschemm" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/jesellier"><img src="https://avatars0.githubusercontent.com/u/51952076?v=4?s=100" width="100px;" alt="jesellier"/><br /><sub><b>jesellier</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jesellier" title="Code">💻</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/jschemm"><img src="https://avatars.githubusercontent.com/u/81151346?v=4?s=100" width="100px;" alt="jschemm"/><br /><sub><b>jschemm</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=jschemm" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/kkoziara"><img src="https://avatars1.githubusercontent.com/u/4346849?v=4?s=100" width="100px;" alt="kkoziara"/><br /><sub><b>kkoziara</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=kkoziara" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Akkoziara" title="Bug reports">🐛</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/matteogales"><img src="https://avatars0.githubusercontent.com/u/9269326?v=4?s=100" width="100px;" alt="matteogales"/><br /><sub><b>matteogales</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=matteogales" title="Code">💻</a> <a href="#design-matteogales" title="Design">🎨</a> <a href="#ideas-matteogales" title="Ideas, Planning, & Feedback">🤔</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/oleskiewicz"><img src="https://avatars1.githubusercontent.com/u/5682158?v=4?s=100" width="100px;" alt="oleskiewicz"/><br /><sub><b>oleskiewicz</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=oleskiewicz" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=oleskiewicz" title="Documentation">📖</a> <a href="https://github.com/sktime/sktime/commits?author=oleskiewicz" title="Tests">⚠️</a></td> @@ -286,10 +288,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d <td align="center" valign="top" width="11.11%"><a href="https://github.com/xloem"><img src="?s=100" width="100px;" alt="patiently pending world peace"/><br /><sub><b>patiently pending world peace</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=xloem" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/raishubham1"><img src="https://avatars3.githubusercontent.com/u/29356417?v=4?s=100" width="100px;" alt="raishubham1"/><br /><sub><b>raishubham1</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=raishubham1" title="Documentation">📖</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/simone-pignotti"><img src="https://avatars1.githubusercontent.com/u/44410066?v=4?s=100" width="100px;" alt="simone-pignotti"/><br /><sub><b>simone-pignotti</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=simone-pignotti" title="Code">💻</a> <a href="https://github.com/sktime/sktime/issues?q=author%3Asimone-pignotti" title="Bug reports">🐛</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/sophijka"><img src="https://avatars2.githubusercontent.com/u/47450591?v=4?s=100" width="100px;" alt="sophijka"/><br /><sub><b>sophijka</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sophijka" title="Documentation">📖</a> <a href="#maintenance-sophijka" title="Maintenance">🚧</a></td> - <td align="center" valign="top" width="11.11%"><a href="https://github.com/sri1419"><img src="https://avatars2.githubusercontent.com/u/65078278?v=4?s=100" width="100px;" alt="sri1419"/><br /><sub><b>sri1419</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sri1419" title="Code">💻</a></td> </tr> <tr> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/sophijka"><img src="https://avatars2.githubusercontent.com/u/47450591?v=4?s=100" width="100px;" alt="sophijka"/><br /><sub><b>sophijka</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sophijka" title="Documentation">📖</a> <a href="#maintenance-sophijka" title="Maintenance">🚧</a></td> + <td align="center" valign="top" width="11.11%"><a href="https://github.com/sri1419"><img src="https://avatars2.githubusercontent.com/u/65078278?v=4?s=100" width="100px;" alt="sri1419"/><br /><sub><b>sri1419</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=sri1419" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/tensorflow-as-tf"><img src="https://avatars.githubusercontent.com/u/51345718?v=4?s=100" width="100px;" alt="tensorflow-as-tf"/><br /><sub><b>tensorflow-as-tf</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=tensorflow-as-tf" title="Code">💻</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/vedazeren"><img src="https://avatars3.githubusercontent.com/u/63582874?v=4?s=100" width="100px;" alt="vedazeren"/><br /><sub><b>vedazeren</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=vedazeren" title="Code">💻</a> <a href="https://github.com/sktime/sktime/commits?author=vedazeren" title="Tests">⚠️</a></td> <td align="center" valign="top" width="11.11%"><a href="https://github.com/vincent-nich12"><img src="https://avatars3.githubusercontent.com/u/36476633?v=4?s=100" width="100px;" alt="vincent-nich12"/><br /><sub><b>vincent-nich12</b></sub></a><br /><a href="https://github.com/sktime/sktime/commits?author=vincent-nich12" title="Code">💻</a></td> diff --git a/sktime/dists_kernels/base/adapters/_tslearn.py b/sktime/dists_kernels/base/adapters/_tslearn.py index 2952c658a74..9cbfa0c6608 100644 --- a/sktime/dists_kernels/base/adapters/_tslearn.py +++ b/sktime/dists_kernels/base/adapters/_tslearn.py @@ -140,4 +140,7 @@ def _transform(self, X, X2=None): if isinstance(X2, list): X2 = self._coerce_df_list_to_list_of_arr(X2) - return self._eval_tslearn_pwtrafo(X, X2) + if self._is_cdist: + return self._eval_tslearn_pwtrafo(X, X2) + else: + return self._eval_tslearn_pwtrafo_vectorized(X, X2)
spacetelescope__jwql-421
Add README to style_guide directory We are starting to have a range of helpful documents in our `jwql/style_guide` directory - the general style guide. This is great! I am thinking it would now be helpful to include a `README.md` file in there, so that any prospective user who looks there is met with some information about what resources are available.
[ { "content": "#! /usr/bin/env python\n\n\"\"\"\nThis module is intended to be a template to aid in creating new\nmonitoring scripts and to demonstrate how to format them to fully\nutilize the ``jwql`` framework.\n\nEach monitoring script must be executable from the command line (i.e.\nhave a ``if '__name__' == '__main__' section), as well as have a \"main\"\nfunction that calls all other functions, methods, or modules (i.e.\nthe entirety of the code is executed within the scope of the main\nfunction), as shown in this example.\n\nUsers may utilize the ``jwql`` framework functions for logging,\nsetting permissions, parsing filenames, etc. (See related ``import``s).\n\nAuthors\n-------\n\n - Catherine Martlin\n - Matthew Bourque\n\nUse\n---\n\n This module can be executed from the command line:\n ::\n\n python monitor_template.py\n\n Alternatively, it can be called from a python environment via the\n following import statements:\n ::\n\n from monitor_template import main_monitor_function\n from monitor_template import secondary_function\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory.\n\nNotes\n-----\n\n Any monitoring script written for ``jwql`` must adhere to the\n ``jwql`` style guide located at:\n https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md\n\"\"\"\n\nimport os\nimport logging\n\nfrom astroquery.mast import Mast\nfrom jwst import datamodels\nfrom bokeh.charts import Donut\nfrom bokeh.embed import components\n\n# Functions for logging\nfrom jwql.logging.logging_functions import configure_logging\nfrom jwql.logging.logging_functions import log_info\nfrom jwql.logging.logging_functions import log_fail\n\n# Function for setting permissions of files/directories\nfrom jwql.permissions.permissions import set_permissions\n\n# Function for parsing filenames\nfrom jwql.utils.utils import filename_parser\n\n# Objects for hard-coded information\nfrom jwql.utils.utils import get_config\nfrom jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES\n\n\n@log_fail\n@log_info\ndef monitor_template_main():\n \"\"\" The main function of the ``monitor_template`` module.\"\"\"\n\n # Example of logging\n my_variable = 'foo'\n logging.info('Some useful information: {}'.format(my_variable))\n\n # Example of querying for a dataset via MAST API\n service = \"Mast.Jwst.Filtered.Niriss\"\n params = {\"columns\": \"filename\",\n \"filters\": [{\"paramName\": \"filter\",\n \"values\": ['F430M']}]}\n response = Mast.service_request_async(service, params)\n result = response[0].json()['data']\n filename_of_interest = result[0]['filename'] # jw00304002001_02102_00001_nis_uncal.fits\n\n # Example of parsing a filename\n filename_dict = filename_parser(filename_of_interest)\n # Contents of filename_dict:\n # {'program_id': '00304',\n # 'observation': '002',\n # 'visit': '001',\n # 'visit_group': '02',\n # 'parallel_seq_id': '1',\n # 'activity': '02',\n # 'exposure_id': '00001',\n # 'detector': 'nis',\n # 'suffix': 'uncal'}\n\n # Example of locating a dataset in the filesystem\n filesystem = get_config()['filesystem']\n dataset = os.path.join(filesystem, 'jw{}'.format(filename_dict['program_id']),\n filename_of_interest)\n\n # Example of reading in dataset using jwst.datamodels\n im = datamodels.open(dataset)\n # Now have access to:\n # im.data # Data array\n # im.err # ERR array\n # im.meta # Metadata such as header keywords\n\n # Example of saving a file and setting permissions\n im.save('some_filename.fits')\n set_permissions('some_filename.fits')\n\n # Example of creating and exporting a Bokeh plot\n plt = Donut(im.data, plot_width=600, plot_height=600)\n plt.sizing_mode = 'stretch_both' # Necessary for responsive sizing on web app\n script, div = components(plt)\n\n plot_output_dir = get_config()['outputs']\n div_outfile = os.path.join(plot_output_dir, 'monitor_name',\n filename_of_interest + \"_component.html\")\n script_outfile = os.path.join(plot_output_dir, 'monitor_name',\n filename_of_interest + \"_component.js\")\n\n for outfile, component in zip([div_outfile, script_outfile], [div, script]):\n with open(outfile, 'w') as f:\n f.write(component)\n f.close()\n set_permissions(outfile)\n\n # Perform any other necessary code\n well_named_variable = \"Function does something.\"\n result_of_second_function = second_function(well_named_variable)\n\n\ndef second_function(input_value):\n \"\"\" This is your axiliary function; you may have many of these.\n\n Parameters\n ----------\n input_value : str\n Some value to modify in the function.\n\n Returns\n -------\n useful_result : str\n The result of modifying the input value.\n \"\"\"\n\n # Begin logging:\n logging.info(\" \")\n logging.info(\"The auxiliary function has started running.\")\n\n # Example function:\n useful_result = input_value + \" The other function did something, too.\"\n\n logging.info(\"The auxiliary function is returning: \")\n logging.info(useful_result)\n logging.info(\" \")\n\n return useful_result\n\n\nif __name__ == '__main__':\n\n # Configure logging\n module = os.path.basename(__file__).strip('.py')\n configure_logging(module)\n\n # Call the main function\n monitor_template_main()\n", "path": "jwql/utils/monitor_template.py" } ]
[ { "content": "#! /usr/bin/env python\n\n\"\"\"\nThis module is intended to be a template to aid in creating new\nmonitoring scripts and to demonstrate how to format them to fully\nutilize the ``jwql`` framework.\n\nEach monitoring script must be executable from the command line (i.e.\nhave a ``if '__name__' == '__main__' section), as well as have a \"main\"\nfunction that calls all other functions, methods, or modules (i.e.\nthe entirety of the code is executed within the scope of the main\nfunction), as shown in this example.\n\nUsers may utilize the ``jwql`` framework functions for logging,\nsetting permissions, parsing filenames, etc. (See related ``import``s).\n\nAuthors\n-------\n\n - Catherine Martlin\n - Matthew Bourque\n\nUse\n---\n\n This module can be executed from the command line:\n ::\n\n python monitor_template.py\n\n Alternatively, it can be called from a python environment via the\n following import statements:\n ::\n\n from monitor_template import main_monitor_function\n from monitor_template import secondary_function\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory.\n\nNotes\n-----\n\n Any monitoring script written for ``jwql`` must adhere to the\n ``jwql`` style guide located at:\n https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md\n\"\"\"\n\nimport os\nimport logging\n\nfrom astroquery.mast import Mast\nfrom jwst import datamodels\nfrom bokeh.charts import Donut\nfrom bokeh.embed import components\n\n# Functions for logging\nfrom jwql.logging.logging_functions import configure_logging\nfrom jwql.logging.logging_functions import log_info\nfrom jwql.logging.logging_functions import log_fail\n\n# Function for setting permissions of files/directories\nfrom jwql.permissions.permissions import set_permissions\n\n# Function for parsing filenames\nfrom jwql.utils.utils import filename_parser\n\n# Objects for hard-coded information\nfrom jwql.utils.utils import get_config\nfrom jwql.utils.constants import JWST_DATAPRODUCTS, JWST_INSTRUMENT_NAMES\n\n\n@log_fail\n@log_info\ndef monitor_template_main():\n \"\"\" The main function of the ``monitor_template`` module.\"\"\"\n\n # Example of logging\n my_variable = 'foo'\n logging.info('Some useful information: {}'.format(my_variable))\n\n # Example of querying for a dataset via MAST API\n service = \"Mast.Jwst.Filtered.Niriss\"\n params = {\"columns\": \"filename\",\n \"filters\": [{\"paramName\": \"filter\",\n \"values\": ['F430M']}]}\n response = Mast.service_request_async(service, params)\n result = response[0].json()['data']\n filename_of_interest = result[0]['filename'] # jw00304002001_02102_00001_nis_uncal.fits\n\n # Example of parsing a filename\n filename_dict = filename_parser(filename_of_interest)\n # Contents of filename_dict:\n # {'program_id': '00304',\n # 'observation': '002',\n # 'visit': '001',\n # 'visit_group': '02',\n # 'parallel_seq_id': '1',\n # 'activity': '02',\n # 'exposure_id': '00001',\n # 'detector': 'nis',\n # 'suffix': 'uncal'}\n\n # Example of locating a dataset in the filesystem\n filesystem = get_config()['filesystem']\n dataset = os.path.join(filesystem, 'jw{}'.format(filename_dict['program_id']),\n filename_of_interest)\n\n # Example of reading in dataset using jwst.datamodels\n im = datamodels.open(dataset)\n # Now have access to:\n # im.data # Data array\n # im.err # ERR array\n # im.meta # Metadata such as header keywords\n\n # Example of saving a file and setting permissions\n im.save('some_filename.fits')\n set_permissions('some_filename.fits')\n\n # Example of creating and exporting a Bokeh plot\n plt = Donut(im.data, plot_width=600, plot_height=600)\n plt.sizing_mode = 'stretch_both' # Necessary for responsive sizing on web app\n script, div = components(plt)\n\n plot_output_dir = get_config()['outputs']\n div_outfile = os.path.join(plot_output_dir, 'monitor_name',\n filename_of_interest + \"_component.html\")\n script_outfile = os.path.join(plot_output_dir, 'monitor_name',\n filename_of_interest + \"_component.js\")\n\n for outfile, component in zip([div_outfile, script_outfile], [div, script]):\n with open(outfile, 'w') as f:\n f.write(component)\n f.close()\n set_permissions(outfile)\n\n # Perform any other necessary code\n well_named_variable = \"Function does something.\"\n result_of_second_function = second_function(well_named_variable)\n\n\ndef second_function(input_value):\n \"\"\" This is your axiliary function; you may have many of these.\n\n Parameters\n ----------\n input_value : str\n Some value to modify in the function.\n\n Returns\n -------\n useful_result : str\n The result of modifying the input value.\n \"\"\"\n\n # Begin logging:\n logging.info(\" \")\n logging.info(\"The auxiliary function has started running.\")\n\n # Example function:\n useful_result = input_value + \" The other function did something, too.\"\n\n logging.info(\"The auxiliary function is returning: \")\n logging.info(useful_result)\n logging.info(\" \")\n\n return useful_result\n\n\nif __name__ == '__main__':\n\n # Configure logging\n module = os.path.basename(__file__).strip('.py')\n configure_logging(module)\n\n # Call the main function\n monitor_template_main()\n", "path": "jwql/utils/monitor_template.py" } ]
diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 146120ea3..d6a129ad9 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -4,7 +4,7 @@ message: # Customize the comment made by the bot opened: # Messages when a new PR is submitted header: "Hello @{name}, Thank you for submitting the Pull Request !" # The keyword {name} is converted into the author's username - footer: "If you have not done so, please consult the [`jwql` Style Guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md)" + footer: "If you have not done so, please consult the [`jwql` Style Guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md)" # The messages can be written as they would over GitHub updated: # Messages when new commits are added to the PR header: "Hello @{name}, Thank you for updating !" diff --git a/README.md b/README.md index db25081f4..e03cd513c 100644 --- a/README.md +++ b/README.md @@ -92,7 +92,7 @@ Much of the `jwql` software depends on the existence of a `config.json` file wit ## Software Contributions -There are two current pages to review before you begin contributing to the `jwql` development. The first is our [style guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md) and the second is our [suggested git workflow page](https://github.com/spacetelescope/jwql/wiki/git-&-GitHub-workflow-for-contributing), which contains an in-depth explanation of the workflow. +There are two current pages to review before you begin contributing to the `jwql` development. The first is our [style guide](https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md) and the second is our [suggested git workflow page](https://github.com/spacetelescope/jwql/wiki/git-&-GitHub-workflow-for-contributing), which contains an in-depth explanation of the workflow. Contributors are also encouraged to check out the [Checklist for Contributors Guide](https://github.com/spacetelescope/jwql/wiki/Checklist-for-Contributors-and-Reviewers-of-Pull-Requests) to ensure the pull request contains all of the necessary changes. diff --git a/jwql/utils/monitor_template.py b/jwql/utils/monitor_template.py index aa443ff9f..ae6d46d9a 100644 --- a/jwql/utils/monitor_template.py +++ b/jwql/utils/monitor_template.py @@ -46,7 +46,7 @@ Any monitoring script written for ``jwql`` must adhere to the ``jwql`` style guide located at: - https://github.com/spacetelescope/jwql/blob/master/style_guide/style_guide.md + https://github.com/spacetelescope/jwql/blob/master/style_guide/README.md """ import os diff --git a/style_guide/style_guide.md b/style_guide/README.md similarity index 100% rename from style_guide/style_guide.md rename to style_guide/README.md
voxel51__fiftyone-3439
[BUG] Fiftyone v0.21.6 localhost Not found, while v0.21.4 Could not connect session, despite working before 7Aug23 ### System information - **OS Platform and Distribution** (`Windows 11 Pro, build 22621.2134`) - **Google Chrome** (`Version 115.0.5790.171 (Official Build) (64-bit)`) - **Python version** (`python --version 3.10.0`) - **FiftyOne version** (`fiftyone --version 0.21.4`) - **FiftyOne installed from** (`pip`) ### Commands to reproduce I have my own custom Python script (including option to run fiftyone's quickstart) working for weeks at least until 7 Aug 2023. I'm unable to share the custom scripts. Today (17 Aug 2023), as I run through installing fiftyone and running my scripts, I encounter the following problems. I had a few updates to Google Chrome browser in these 10 days too. ### Describe the problem Here's what i've tried. Scenario A: fiftyone v0.21.6 Problem: `App launched. Point your browser to http://localhost:5151`. But unable to load App page at localhost 5151 ("Not found" displayed on page) Solutions tried but did not work: [Registry settings](https://github.com/voxel51/fiftyone/issues/2010) and/or [mimetype](https://github.com/voxel51/fiftyone/issues/2522#issuecomment-1416318362) Scenario B: fiftyone v0.21.4 Problem: unable to fully display the App (it only shows the fiftyone skeleton page with "Select Dataset", and no layout and data in the middle). The terminal repeatedly gives `could not connect session, retrying in 10 seconds`. I'm unsure what is the cause to the above and would appreciate your assistance. ### What areas of FiftyOne does this bug affect? - [x] `App`: FiftyOne application issue - [ ] `Core`: Core Python library issue - [ ] `Server`: FiftyOne server issue ### Willingness to contribute The FiftyOne Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the FiftyOne codebase? - [ ] Yes. I can contribute a fix for this bug independently - [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the FiftyOne community - [x] No. I cannot contribute a bug fix at this time
[ { "content": "\"\"\"\nFiftyOne Server app.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom datetime import date, datetime\nimport os\nimport pathlib\n\nimport eta.core.utils as etau\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import (\n BaseHTTPMiddleware,\n RequestResponseEndpoint,\n)\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Mount, Route\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.types import Scope\nimport strawberry as gql\n\nimport fiftyone as fo\nimport fiftyone.constants as foc\nfrom fiftyone.server.context import GraphQL\nfrom fiftyone.server.extensions import EndSession\nfrom fiftyone.server.mutation import Mutation\nfrom fiftyone.server.query import Query\nfrom fiftyone.server.routes import routes\nfrom fiftyone.server.scalars import Date, DateTime\n\n\netau.ensure_dir(os.path.join(os.path.dirname(__file__), \"static\"))\n\n\nclass Static(StaticFiles):\n async def get_response(self, path: str, scope: Scope) -> Response:\n response = await super().get_response(path, scope)\n\n if response.status_code == 404:\n path = pathlib.Path(\n *pathlib.Path(path).parts[2:]\n ) # strip dataset/{name}\n response = await super().get_response(path, scope)\n if response.status_code == 404:\n full_path, stat_result = self.lookup_path(\"index.html\")\n return self.file_response(full_path, stat_result, scope)\n\n return response\n\n\nclass HeadersMiddleware(BaseHTTPMiddleware):\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n response = await call_next(request)\n response.headers[\"x-colab-notebook-cache-control\"] = \"no-cache\"\n return response\n\n\nschema = gql.Schema(\n mutation=Mutation,\n query=Query,\n extensions=[EndSession],\n scalar_overrides={\n date: Date,\n datetime: DateTime,\n },\n)\n\n\napp = Starlette(\n middleware=[\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"GET\", \"POST\", \"HEAD\", \"OPTIONS\"],\n allow_headers=[\n \"access-control-allow-origin\",\n \"authorization\",\n \"content-type\",\n ],\n ),\n Middleware(HeadersMiddleware),\n ],\n debug=foc.DEV_INSTALL,\n routes=[Route(route, endpoint) for route, endpoint in routes]\n + [\n Route(\n \"/graphql\",\n GraphQL(\n schema,\n graphiql=foc.DEV_INSTALL,\n ),\n ),\n Mount(\n \"/plugins\",\n app=Static(\n directory=fo.config.plugins_dir,\n html=True,\n check_dir=False,\n follow_symlink=True,\n ),\n name=\"plugins\",\n ),\n Mount(\n \"/\",\n app=Static(\n directory=os.path.join(os.path.dirname(__file__), \"static\"),\n html=True,\n follow_symlink=True,\n ),\n name=\"static\",\n ),\n ],\n)\n", "path": "fiftyone/server/app.py" } ]
[ { "content": "\"\"\"\nFiftyOne Server app.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom datetime import date, datetime\nimport os\nimport pathlib\n\nimport eta.core.utils as etau\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.base import (\n BaseHTTPMiddleware,\n RequestResponseEndpoint,\n)\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Mount, Route\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.types import Scope\nimport strawberry as gql\n\nimport fiftyone as fo\nimport fiftyone.constants as foc\nfrom fiftyone.server.context import GraphQL\nfrom fiftyone.server.extensions import EndSession\nfrom fiftyone.server.mutation import Mutation\nfrom fiftyone.server.query import Query\nfrom fiftyone.server.routes import routes\nfrom fiftyone.server.scalars import Date, DateTime\n\n\netau.ensure_dir(os.path.join(os.path.dirname(__file__), \"static\"))\n\n\nclass Static(StaticFiles):\n async def get_response(self, path: str, scope: Scope) -> Response:\n response = await super().get_response(path, scope)\n\n if response.status_code == 404:\n path = pathlib.Path(\n *pathlib.Path(path).parts[2:]\n ) # strip dataset/{name}\n response = await super().get_response(path, scope)\n if response.status_code == 404:\n full_path, stat_result = self.lookup_path(\"index.html\")\n return self.file_response(full_path, stat_result, scope)\n\n return response\n\n\nclass HeadersMiddleware(BaseHTTPMiddleware):\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n response = await call_next(request)\n response.headers[\"x-colab-notebook-cache-control\"] = \"no-cache\"\n return response\n\n\nschema = gql.Schema(\n mutation=Mutation,\n query=Query,\n extensions=[EndSession],\n scalar_overrides={\n date: Date,\n datetime: DateTime,\n },\n)\n\n\napp = Starlette(\n middleware=[\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"GET\", \"POST\", \"HEAD\", \"OPTIONS\"],\n allow_headers=[\n \"access-control-allow-origin\",\n \"authorization\",\n \"content-type\",\n ],\n ),\n Middleware(HeadersMiddleware),\n ],\n debug=foc.DEV_INSTALL,\n routes=[Route(route, endpoint) for route, endpoint in routes]\n + [\n Route(\n \"/graphql\",\n GraphQL(\n schema,\n graphiql=foc.DEV_INSTALL,\n ),\n ),\n Mount(\n \"/plugins\",\n app=Static(\n directory=fo.config.plugins_dir,\n html=True,\n check_dir=False,\n follow_symlink=True,\n ),\n name=\"plugins\",\n ),\n Mount(\n \"/\",\n app=Static(\n directory=os.path.join(os.path.dirname(__file__), \"static\"),\n html=True,\n ),\n name=\"static\",\n ),\n ],\n)\n", "path": "fiftyone/server/app.py" } ]
diff --git a/fiftyone/server/app.py b/fiftyone/server/app.py index 9c38331d843..8af626b0073 100644 --- a/fiftyone/server/app.py +++ b/fiftyone/server/app.py @@ -112,7 +112,6 @@ async def dispatch( app=Static( directory=os.path.join(os.path.dirname(__file__), "static"), html=True, - follow_symlink=True, ), name="static", ),
City-of-Helsinki__linkedevents-130
Add short description in kulke importer
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\nimport re\nimport functools\nfrom lxml import etree\nfrom modeltranslation.translator import translator\nimport dateutil\nfrom pytz import timezone\nfrom django.conf import settings\nfrom django.utils.timezone import get_default_timezone\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.db import IntegrityError\n\nfrom .sync import ModelSyncher\nfrom .base import Importer, register_importer, recur_dict\nfrom .util import unicodetext, active_language\nfrom events.models import DataSource, Place, Event, Keyword, KeywordLabel, Organization, EventAggregate, EventAggregateMember\nfrom events.keywords import KeywordMatcher\nfrom events.translation_utils import expand_model_fields\n\nLOCATION_TPREK_MAP = {\n 'malmitalo': '8740',\n 'malms kulturhus': '8740',\n 'stoa': '7259',\n 'kanneltalo': '7255',\n 'vuotalo': '7260',\n 'vuosali': '7260',\n 'savoy-teatteri': '7258',\n 'savoy': '7258',\n 'annantalo': '7254',\n 'annegården': '7254',\n 'espan lava': '7265',\n 'caisa': '7256',\n 'nuorisokahvila clubi': '8006',\n 'haagan nuorisotalo': '8023',\n 'vuosaaren kirjasto': '8310',\n}\n\nADDRESS_TPREK_MAP = {\n 'annankatu 30': 'annantalo',\n 'annegatan 30': 'annantalo',\n 'mosaiikkitori 2': 'vuotalo',\n 'ala-malmin tori 1': 'malmitalo',\n 'ala-malmin tori': 'malmitalo',\n 'klaneettitie 5': 'kanneltalo',\n 'klarinettvägen 5': 'kanneltalo',\n}\n\nCATEGORIES_TO_IGNORE = [\n 286, 596, 614, 307, 632, 645, 675, 231, 364, 325, 324, 319, 646, 640,\n 641, 642, 643, 670, 671, 673, 674, 725, 312, 344, 365, 239, 240, 308, 623,\n 229, 230, 323, 320, 357, 358, 728, 729, 730, 735, 736,\n\n # The categories below are languages, ignore as categories\n # todo: add as event languages\n 53, 54, 55\n]\n\n# These events are courses - not to be published\nSKIP_EVENTS_WITH_CATEGORY = {\n 70, 71, 72, 73, 75, 77, 79, 80,\n 81, 83, 84, 85, 87, 316, 629, 632,\n 728, 729, 730, 735,\n}\n\ndef _query_courses():\n filter_out_keywords = map(\n make_kulke_id,\n SKIP_EVENTS_WITH_CATEGORY\n )\n return Event.objects.filter(\n data_source='kulke'\n ).filter(\n keywords__id__in=set(filter_out_keywords)\n )\n\ndef _delete_courses():\n courses_q = _query_courses()\n courses_q.delete()\n\n\nSPORTS = ['p965']\nGYMS = ['p8504']\nCHILDREN = ['p4354']\nMOVIES = ['p1235', 'p16327']\nMANUAL_CATEGORIES = {\n # urheilu\n 546: SPORTS, 547: SPORTS, 431: SPORTS, 638: SPORTS,\n # kuntosalit\n 607: GYMS, 615: GYMS,\n # harrastukset\n 626: ['p2901'],\n # erityisliikunta\n 634: ['p3093'],\n # monitaiteisuus\n 223: ['p25216'],\n # seniorit > ikääntyneet\n 354: ['p2433'],\n # saunominen\n 371: ['p11049'],\n # lastentapahtumat > lapset (!)\n 105: CHILDREN,\n # steppi\n 554: ['p19614'],\n # liikuntaleiri\n 710: ['p143', 'p916'],\n # teatteri ja sirkus\n 351: ['p2850'],\n # elokuva ja media\n 205: MOVIES + ['p2445'],\n # skidikino\n 731: CHILDREN + MOVIES,\n # luennot ja keskustelut\n 733: ['p15875', 'p14004'],\n # nuorille\n 734: ['p11617'],\n # elokuva\n 737: MOVIES\n}\n\n\nLOCAL_TZ = timezone('Europe/Helsinki')\n\ndef make_kulke_id(num):\n return \"kulke:{}\".format(num)\n\ndef make_event_name(title, subtitle):\n if title and subtitle:\n return \"{} – {}\".format(title, subtitle)\n elif title:\n return title\n elif subtitle:\n return subtitle\n\ndef get_event_name(event):\n if 'fi' in event['name']:\n return event['name']['fi']\n else:\n names = list(event['name'].values())\n if len(names):\n return None\n else:\n return names[0]\n\n@register_importer\nclass KulkeImporter(Importer):\n name = \"kulke\"\n supported_languages = ['fi', 'sv', 'en']\n\n def setup(self):\n ds_args = dict(id=self.name)\n defaults = dict(name='Kulttuurikeskus')\n self.tprek_data_source = DataSource.objects.get(id='tprek')\n self.data_source, _ = DataSource.objects.get_or_create(defaults=defaults, **ds_args)\n\n ds_args = dict(id='ahjo')\n defaults = dict(name='Ahjo')\n ahjo_ds, _ = DataSource.objects.get_or_create(defaults=defaults, **ds_args)\n\n org_args = dict(id='ahjo:46101')\n defaults = dict(name='Kulttuurikeskus', data_source=ahjo_ds)\n self.organization, _ = Organization.objects.get_or_create(defaults=defaults, **org_args)\n\n # Build a cached list of Places to avoid frequent hits to the db\n id_list = LOCATION_TPREK_MAP.values()\n place_list = Place.objects.filter(data_source=self.tprek_data_source).filter(origin_id__in=id_list)\n self.tprek_by_id = {p.origin_id: p.id for p in place_list}\n\n print('Preprocessing categories')\n categories = self.parse_kulke_categories()\n\n keyword_matcher = KeywordMatcher()\n for cid, c in list(categories.items()):\n if c is None:\n continue\n match_type = 'no match'\n ctext = c['text']\n # Ignore list (not used and/or not a category for general consumption)\n #\n # These are ignored for now, could be used for\n # target group extraction or for other info\n # were they actually used in the data:\n if cid in CATEGORIES_TO_IGNORE\\\n or c['type'] == 2 or c['type'] == 3:\n continue\n\n manual = MANUAL_CATEGORIES.get(cid)\n if manual:\n try:\n yso_ids = ['yso:{}'.format(i) for i in manual]\n yso_keywords = Keyword.objects.filter(id__in=yso_ids)\n c['yso_keywords'] = yso_keywords\n except Keyword.DoesNotExist:\n pass\n else:\n replacements = [('jumppa', 'voimistelu'), ('Stoan', 'Stoa')]\n for src, dest in replacements:\n ctext = re.sub(src, dest, ctext, flags=re.IGNORECASE)\n c['yso_keywords'] = keyword_matcher.match(ctext)\n\n self.categories = categories\n\n def parse_kulke_categories(self):\n categories = {}\n categories_file = os.path.join(\n settings.IMPORT_FILE_PATH, 'kulke', 'category.xml')\n root = etree.parse(categories_file)\n for ctype in root.xpath('/data/categories/category'):\n cid = int(ctype.attrib['id'])\n typeid = int(ctype.attrib['typeid'])\n categories[cid] = {\n 'type': typeid, 'text': ctype.text}\n return categories\n\n\n def find_place(self, event):\n tprek_id = None\n location = event['location']\n if location['name'] is None:\n print(\"Missing place for event %s (%s)\" % (\n get_event_name(event), event['origin_id']))\n return None\n\n loc_name = location['name'].lower()\n if loc_name in LOCATION_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[loc_name]\n\n if not tprek_id:\n # Exact match not found, check for string begin\n for k in LOCATION_TPREK_MAP.keys():\n if loc_name.startswith(k):\n tprek_id = LOCATION_TPREK_MAP[k]\n break\n\n if not tprek_id:\n # Check for venue name inclusion\n if 'caisa' in loc_name:\n tprek_id = LOCATION_TPREK_MAP['caisa']\n elif 'annantalo' in loc_name:\n tprek_id = LOCATION_TPREK_MAP['annantalo']\n\n if not tprek_id and 'fi' in location['street_address']:\n # Okay, try address.\n if 'fi' in location['street_address'] and location['street_address']['fi']:\n addr = location['street_address']['fi'].lower()\n if addr in ADDRESS_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[ADDRESS_TPREK_MAP[addr]]\n\n if not tprek_id and 'sv' in location['street_address']:\n # Okay, try Swedish address.\n if 'sv' in location['street_address'] and location['street_address']['sv']:\n addr = location['street_address']['sv'].lower()\n if addr in ADDRESS_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[ADDRESS_TPREK_MAP[addr]]\n\n if tprek_id:\n event['location']['id'] = self.tprek_by_id[tprek_id]\n else:\n print(\"No match found for place '%s' (event %s)\" % (loc_name, get_event_name(event)))\n\n def _import_event(self, lang, event_el, events):\n tag = lambda t: 'event' + t\n text = lambda t: unicodetext(event_el.find(tag(t)))\n def clean(t):\n if t is None:\n return None\n t = t.strip()\n if not t:\n return None\n return t\n text_content = lambda k: clean(text(k))\n\n eid = int(event_el.attrib['id'])\n\n if text_content('servicecode') != 'Pelkkä ilmoitus':\n # Skip courses\n return False\n\n if self.options['single']:\n if str(eid) != self.options['single']:\n return False\n\n event = events[eid]\n event['data_source'] = self.data_source\n event['publisher'] = self.organization\n event['origin_id'] = eid\n\n title = text_content('title')\n subtitle = text_content('subtitle')\n event['headline'][lang] = title\n event['secondary_headline'][lang] = subtitle\n event['name'][lang] = make_event_name(title, subtitle)\n\n caption = text_content('caption')\n bodytext = text_content('bodytext')\n description = ''\n if caption:\n description += caption\n if caption and bodytext:\n description += \"\\n\\n\"\n if bodytext:\n description += bodytext\n if description:\n event['description'][lang] = description\n\n event['info_url'][lang] = text_content('www')\n # todo: process extra links?\n links = event_el.find(tag('links'))\n if links is not None:\n links = links.findall(tag('link'))\n assert len(links)\n else:\n links = []\n external_links = []\n for link_el in links:\n link = unicodetext(link_el)\n if not re.match(r'^\\w+?://', link):\n link = 'http://' + link\n try:\n self.url_validator(link)\n except ValidationError:\n continue\n except ValueError:\n print('value error with event %s and url %s ' % (eid, link))\n external_links.append({'link': link})\n event['external_links'][lang] = external_links\n\n eventattachments = event_el.find(tag('attachments'))\n if eventattachments is not None:\n for attachment in eventattachments:\n if attachment.attrib['type'] == 'teaserimage':\n event['image'] = unicodetext(attachment).strip()\n break\n\n event['provider'][lang] = text_content('organizer')\n\n start_time = dateutil.parser.parse(text('starttime'))\n # Start and end times are in GMT. Sometimes only dates are provided.\n # If it's just a date, tzinfo is None.\n # FIXME: Mark that time is missing somehow?\n if not start_time.tzinfo:\n assert start_time.hour == 0 and start_time.minute == 0 and start_time.second == 0\n start_time = LOCAL_TZ.localize(start_time)\n event['has_start_time'] = False\n else:\n start_time = start_time.astimezone(LOCAL_TZ)\n event['has_start_time'] = True\n event['start_time'] = start_time\n if text('endtime'):\n end_time = dateutil.parser.parse(text('endtime'))\n if not end_time.tzinfo:\n assert end_time.hour == 0 and end_time.minute == 0 and end_time.second == 0\n end_time = LOCAL_TZ.localize(end_time)\n event['has_end_time'] = False\n else:\n end_time = end_time.astimezone(LOCAL_TZ)\n event['has_end_time'] = True\n\n event['end_time'] = end_time\n\n # todo: verify enrolment use cases, proper fields\n event['custom']['enrolment']['start_time'] = dateutil.parser.parse(\n text('enrolmentstarttime')\n )\n event['custom']['enrolment']['end_time'] = dateutil.parser.parse(\n text('enrolmentendtime')\n )\n\n if 'offers' not in event:\n event['offers'] = [recur_dict()]\n\n offer = event['offers'][0]\n price = text_content('price')\n price_el = event_el.find(tag('price'))\n free = (price_el.attrib['free'] == \"true\")\n\n offer['is_free'] = free\n description = price_el.get('ticketinfo')\n if description and 'href' in description:\n # the field sometimes contains some really bad invalid html\n # snippets\n description = None\n offer['description'][lang] = description\n if not free:\n offer['price'][lang] = price\n offer['info_url'][lang] = price_el.get('ticketlink')\n\n if hasattr(self, 'categories'):\n event_keywords = set()\n for category_id in event_el.find(tag('categories')):\n category = self.categories.get(int(category_id.text))\n if category:\n # YSO keywords\n if category.get('yso_keywords'):\n for c in category.get('yso_keywords', []):\n event_keywords.add(c)\n # Also save original kulke categories as keywords\n kulke_id = make_kulke_id(category_id.text)\n try:\n kulke_keyword = Keyword.objects.get(pk=kulke_id)\n event_keywords.add(kulke_keyword)\n except Keyword.DoesNotExist:\n print('Could not find {}'.format(kulke_id))\n\n event['keywords'] = event_keywords\n\n location = event['location']\n\n location['street_address'][lang] = text_content('address')\n location['postal_code'] = text_content('postalcode')\n municipality = text_content('postaloffice')\n if municipality == 'Helsingin kaupunki':\n municipality = 'Helsinki'\n location['address_locality'][lang] = municipality\n location['telephone'][lang] = text_content('phone')\n location['name'] = text_content('location')\n\n if not 'place' in location:\n self.find_place(event)\n return True\n\n def _gather_recurring_events(self, lang, event_el, events, recurring_groups):\n references = event_el.find('eventreferences')\n this_id = int(event_el.attrib['id'])\n if references is None or len(references) < 1:\n group = set()\n else:\n recurs = references.findall('recurring') or []\n recur_ids = map(lambda x: int(x.attrib['id']), recurs)\n group = set(recur_ids)\n group.add(this_id)\n recurring_groups[this_id] = group\n\n def _verify_recurs(self, recurring_groups):\n for key, group in recurring_groups.items():\n for inner_key in group:\n inner_group = recurring_groups.get(inner_key)\n if inner_group and inner_group != group:\n print('Differing groups:', key, inner_key)\n print('Differing groups:', group, inner_group)\n if len(inner_group) == 0:\n print(\n 'Event self-identifies to no group, removing.',\n inner_key\n )\n group.remove(inner_key)\n\n def _update_super_event(self, super_event):\n events = super_event.get_children()\n first_event = events.order_by('start_time').first()\n super_event.start_time = first_event.start_time\n super_event.has_start_time = first_event.has_start_time\n last_event = events.order_by('-end_time').first()\n super_event.end_time = last_event.end_time\n super_event.has_end_time = last_event.has_end_time\n\n # Functions which map related models into simple comparable values.\n def simple(field):\n return frozenset(map(lambda x: x.simple_value(), field.all()))\n value_mappers = {\n 'offers': simple,\n 'external_links': simple\n }\n fieldnames = expand_model_fields(\n super_event, [\n 'info_url', 'description', 'short_description', 'headline',\n 'secondary_headline', 'provider', 'publisher', 'location',\n 'location_extra_info', 'data_source',\n 'image', 'offers', 'external_links'])\n\n # The set of fields which have common values for all events.\n common_fields = set(\n f for f in fieldnames\n if 1 == len(set(map(\n value_mappers.get(f, lambda x: x),\n (getattr(event, f) for event in events.all())))))\n\n for fieldname in common_fields:\n value = getattr(events.first(), fieldname)\n if hasattr(value, 'all'):\n manager = getattr(super_event, fieldname)\n simple = False\n if hasattr(value.first(), 'simple_value'):\n # Simple related models can be deleted and copied.\n manager.all().delete()\n simple = True\n for m in value.all():\n if simple:\n m.id = None\n m.event_id = super_event.id\n m.save()\n manager.add(m)\n else:\n setattr(super_event, fieldname, value)\n\n # The name may vary within a recurring event; hence, take the common part\n if expand_model_fields(super_event, ['headline'])[0] not in common_fields:\n words = getattr(events.first(), 'headline').split(' ')\n name = ''\n is_common = lambda: all(\n headline.startswith(name + words[0])\n for headline in [event.name for event in events]\n )\n while words and is_common():\n name += words.pop(0) + ' '\n print(words)\n print(name)\n setattr(super_event, 'name', name)\n\n for lang in self.languages.keys():\n headline = getattr(\n super_event, 'headline_{}'.format(lang)\n )\n secondary_headline = getattr(\n super_event, 'secondary_headline_{}'.format(lang)\n )\n setattr(super_event, 'name_{}'.format(lang),\n make_event_name(headline, secondary_headline)\n )\n\n # Gather common keywords present in *all* subevents\n common_keywords = functools.reduce(\n lambda x, y: x & y,\n (set(event.keywords.all()) for event in events.all())\n )\n super_event.keywords.clear()\n for k in common_keywords:\n super_event.keywords.add(k)\n\n super_event.save()\n\n def _save_recurring_superevents(self, recurring_groups):\n groups = map(frozenset, recurring_groups.values())\n aggregates = set()\n for group in groups:\n kulke_ids = set(map(make_kulke_id, group))\n superevent_aggregates = EventAggregate.objects.filter(\n members__event__id__in=kulke_ids\n ).distinct()\n cnt = superevent_aggregates.count()\n\n if cnt > 1:\n print('Error: the superevent has an ambiguous aggregate group.')\n print('Aggregate ids: {}, group: {}'.format(\n superevent_aggregates.values_list('id', flat=True), group))\n continue\n\n events = Event.objects.filter(id__in=kulke_ids)\n if events.count() < 2:\n continue\n\n aggregate = None\n if cnt == 0:\n if len(group) == 1:\n # Do not create aggregates of only one.\n continue\n aggregate = EventAggregate()\n aggregate.save()\n super_event = Event(\n publisher=self.organization,\n is_recurring_super=True,\n data_source=DataSource.objects.get(pk='kulke'), # TODO\n id=\"linkedevents:agg-{}\".format(aggregate.id))\n super_event.save()\n aggregate.super_event = super_event\n aggregate.save()\n for event in events:\n member = EventAggregateMember.objects.create(event=event,\n event_aggregate=aggregate)\n elif cnt == 1:\n aggregate = superevent_aggregates.first()\n if len(group) == 1:\n events = Event.objects.get(\n pk=make_kulke_id(group.pop()))\n # The imported event is not part of an aggregate\n # but one was found it in the db. Remove the event\n # from the aggregate. This is the only case when\n # an event is removed from a recurring aggregate.\n aggregate.members.remove(events)\n else:\n for event in events:\n try:\n member = EventAggregateMember.objects.create(event=event,\n event_aggregate=aggregate)\n except IntegrityError:\n # Ignore unique violations. They\n # ensure that no duplicate members are added.\n pass\n for event in events:\n event.super_event = aggregate.super_event\n event.save()\n aggregates.add(aggregate)\n return aggregates\n\n def import_events(self):\n print(\"Importing Kulke events\")\n self.url_validator = URLValidator()\n events = recur_dict()\n recurring_groups = dict()\n for lang in ['fi', 'sv', 'en']:\n events_file = os.path.join(\n settings.IMPORT_FILE_PATH, 'kulke', 'events-%s.xml' % lang)\n root = etree.parse(events_file)\n for event_el in root.xpath('/eventdata/event'):\n success = self._import_event(lang, event_el, events)\n if success:\n self._gather_recurring_events(lang, event_el, events, recurring_groups)\n\n events.default_factory = None\n\n filter_out_keywords = set(map(\n make_kulke_id,\n SKIP_EVENTS_WITH_CATEGORY\n ))\n for eid, event in events.items():\n skip = False\n for kw in event['keywords']:\n if kw.id in filter_out_keywords:\n skip = True\n break\n if skip:\n continue\n self.save_event(event)\n\n self._verify_recurs(recurring_groups)\n aggregates = self._save_recurring_superevents(recurring_groups)\n for agg in aggregates:\n self._update_super_event(agg.super_event)\n\n def import_keywords(self):\n print(\"Importing Kulke categories as keywords\")\n categories = self.parse_kulke_categories()\n for kid, value in categories.items():\n try:\n # if the keyword exists, update the name if needed\n word = Keyword.objects.get(id=make_kulke_id(kid))\n if word.name != value['text']:\n word.name = value['text']\n word.save()\n except ObjectDoesNotExist:\n # if the keyword does not exist, save it for future use\n Keyword.objects.create(\n id=make_kulke_id(kid),\n name=value['text'],\n data_source=self.data_source\n )\n", "path": "events/importer/kulke.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\nimport re\nimport functools\nfrom lxml import etree\nfrom modeltranslation.translator import translator\nimport dateutil\nfrom pytz import timezone\nfrom django.conf import settings\nfrom django.utils.timezone import get_default_timezone\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.db import IntegrityError\n\nfrom .sync import ModelSyncher\nfrom .base import Importer, register_importer, recur_dict\nfrom .util import unicodetext, active_language\nfrom events.models import DataSource, Place, Event, Keyword, KeywordLabel, Organization, EventAggregate, EventAggregateMember\nfrom events.keywords import KeywordMatcher\nfrom events.translation_utils import expand_model_fields\n\nLOCATION_TPREK_MAP = {\n 'malmitalo': '8740',\n 'malms kulturhus': '8740',\n 'stoa': '7259',\n 'kanneltalo': '7255',\n 'vuotalo': '7260',\n 'vuosali': '7260',\n 'savoy-teatteri': '7258',\n 'savoy': '7258',\n 'annantalo': '7254',\n 'annegården': '7254',\n 'espan lava': '7265',\n 'caisa': '7256',\n 'nuorisokahvila clubi': '8006',\n 'haagan nuorisotalo': '8023',\n 'vuosaaren kirjasto': '8310',\n}\n\nADDRESS_TPREK_MAP = {\n 'annankatu 30': 'annantalo',\n 'annegatan 30': 'annantalo',\n 'mosaiikkitori 2': 'vuotalo',\n 'ala-malmin tori 1': 'malmitalo',\n 'ala-malmin tori': 'malmitalo',\n 'klaneettitie 5': 'kanneltalo',\n 'klarinettvägen 5': 'kanneltalo',\n}\n\nCATEGORIES_TO_IGNORE = [\n 286, 596, 614, 307, 632, 645, 675, 231, 364, 325, 324, 319, 646, 640,\n 641, 642, 643, 670, 671, 673, 674, 725, 312, 344, 365, 239, 240, 308, 623,\n 229, 230, 323, 320, 357, 358, 728, 729, 730, 735, 736,\n\n # The categories below are languages, ignore as categories\n # todo: add as event languages\n 53, 54, 55\n]\n\n# These events are courses - not to be published\nSKIP_EVENTS_WITH_CATEGORY = {\n 70, 71, 72, 73, 75, 77, 79, 80,\n 81, 83, 84, 85, 87, 316, 629, 632,\n 728, 729, 730, 735,\n}\n\ndef _query_courses():\n filter_out_keywords = map(\n make_kulke_id,\n SKIP_EVENTS_WITH_CATEGORY\n )\n return Event.objects.filter(\n data_source='kulke'\n ).filter(\n keywords__id__in=set(filter_out_keywords)\n )\n\ndef _delete_courses():\n courses_q = _query_courses()\n courses_q.delete()\n\n\nSPORTS = ['p965']\nGYMS = ['p8504']\nCHILDREN = ['p4354']\nMOVIES = ['p1235', 'p16327']\nMANUAL_CATEGORIES = {\n # urheilu\n 546: SPORTS, 547: SPORTS, 431: SPORTS, 638: SPORTS,\n # kuntosalit\n 607: GYMS, 615: GYMS,\n # harrastukset\n 626: ['p2901'],\n # erityisliikunta\n 634: ['p3093'],\n # monitaiteisuus\n 223: ['p25216'],\n # seniorit > ikääntyneet\n 354: ['p2433'],\n # saunominen\n 371: ['p11049'],\n # lastentapahtumat > lapset (!)\n 105: CHILDREN,\n # steppi\n 554: ['p19614'],\n # liikuntaleiri\n 710: ['p143', 'p916'],\n # teatteri ja sirkus\n 351: ['p2850'],\n # elokuva ja media\n 205: MOVIES + ['p2445'],\n # skidikino\n 731: CHILDREN + MOVIES,\n # luennot ja keskustelut\n 733: ['p15875', 'p14004'],\n # nuorille\n 734: ['p11617'],\n # elokuva\n 737: MOVIES\n}\n\n\nLOCAL_TZ = timezone('Europe/Helsinki')\n\ndef make_kulke_id(num):\n return \"kulke:{}\".format(num)\n\ndef make_event_name(title, subtitle):\n if title and subtitle:\n return \"{} – {}\".format(title, subtitle)\n elif title:\n return title\n elif subtitle:\n return subtitle\n\ndef get_event_name(event):\n if 'fi' in event['name']:\n return event['name']['fi']\n else:\n names = list(event['name'].values())\n if len(names):\n return None\n else:\n return names[0]\n\n@register_importer\nclass KulkeImporter(Importer):\n name = \"kulke\"\n supported_languages = ['fi', 'sv', 'en']\n\n def setup(self):\n ds_args = dict(id=self.name)\n defaults = dict(name='Kulttuurikeskus')\n self.tprek_data_source = DataSource.objects.get(id='tprek')\n self.data_source, _ = DataSource.objects.get_or_create(defaults=defaults, **ds_args)\n\n ds_args = dict(id='ahjo')\n defaults = dict(name='Ahjo')\n ahjo_ds, _ = DataSource.objects.get_or_create(defaults=defaults, **ds_args)\n\n org_args = dict(id='ahjo:46101')\n defaults = dict(name='Kulttuurikeskus', data_source=ahjo_ds)\n self.organization, _ = Organization.objects.get_or_create(defaults=defaults, **org_args)\n\n # Build a cached list of Places to avoid frequent hits to the db\n id_list = LOCATION_TPREK_MAP.values()\n place_list = Place.objects.filter(data_source=self.tprek_data_source).filter(origin_id__in=id_list)\n self.tprek_by_id = {p.origin_id: p.id for p in place_list}\n\n print('Preprocessing categories')\n categories = self.parse_kulke_categories()\n\n keyword_matcher = KeywordMatcher()\n for cid, c in list(categories.items()):\n if c is None:\n continue\n match_type = 'no match'\n ctext = c['text']\n # Ignore list (not used and/or not a category for general consumption)\n #\n # These are ignored for now, could be used for\n # target group extraction or for other info\n # were they actually used in the data:\n if cid in CATEGORIES_TO_IGNORE\\\n or c['type'] == 2 or c['type'] == 3:\n continue\n\n manual = MANUAL_CATEGORIES.get(cid)\n if manual:\n try:\n yso_ids = ['yso:{}'.format(i) for i in manual]\n yso_keywords = Keyword.objects.filter(id__in=yso_ids)\n c['yso_keywords'] = yso_keywords\n except Keyword.DoesNotExist:\n pass\n else:\n replacements = [('jumppa', 'voimistelu'), ('Stoan', 'Stoa')]\n for src, dest in replacements:\n ctext = re.sub(src, dest, ctext, flags=re.IGNORECASE)\n c['yso_keywords'] = keyword_matcher.match(ctext)\n\n self.categories = categories\n\n def parse_kulke_categories(self):\n categories = {}\n categories_file = os.path.join(\n settings.IMPORT_FILE_PATH, 'kulke', 'category.xml')\n root = etree.parse(categories_file)\n for ctype in root.xpath('/data/categories/category'):\n cid = int(ctype.attrib['id'])\n typeid = int(ctype.attrib['typeid'])\n categories[cid] = {\n 'type': typeid, 'text': ctype.text}\n return categories\n\n\n def find_place(self, event):\n tprek_id = None\n location = event['location']\n if location['name'] is None:\n print(\"Missing place for event %s (%s)\" % (\n get_event_name(event), event['origin_id']))\n return None\n\n loc_name = location['name'].lower()\n if loc_name in LOCATION_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[loc_name]\n\n if not tprek_id:\n # Exact match not found, check for string begin\n for k in LOCATION_TPREK_MAP.keys():\n if loc_name.startswith(k):\n tprek_id = LOCATION_TPREK_MAP[k]\n break\n\n if not tprek_id:\n # Check for venue name inclusion\n if 'caisa' in loc_name:\n tprek_id = LOCATION_TPREK_MAP['caisa']\n elif 'annantalo' in loc_name:\n tprek_id = LOCATION_TPREK_MAP['annantalo']\n\n if not tprek_id and 'fi' in location['street_address']:\n # Okay, try address.\n if 'fi' in location['street_address'] and location['street_address']['fi']:\n addr = location['street_address']['fi'].lower()\n if addr in ADDRESS_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[ADDRESS_TPREK_MAP[addr]]\n\n if not tprek_id and 'sv' in location['street_address']:\n # Okay, try Swedish address.\n if 'sv' in location['street_address'] and location['street_address']['sv']:\n addr = location['street_address']['sv'].lower()\n if addr in ADDRESS_TPREK_MAP:\n tprek_id = LOCATION_TPREK_MAP[ADDRESS_TPREK_MAP[addr]]\n\n if tprek_id:\n event['location']['id'] = self.tprek_by_id[tprek_id]\n else:\n print(\"No match found for place '%s' (event %s)\" % (loc_name, get_event_name(event)))\n\n def _import_event(self, lang, event_el, events):\n tag = lambda t: 'event' + t\n text = lambda t: unicodetext(event_el.find(tag(t)))\n def clean(t):\n if t is None:\n return None\n t = t.strip()\n if not t:\n return None\n return t\n text_content = lambda k: clean(text(k))\n\n eid = int(event_el.attrib['id'])\n\n if text_content('servicecode') != 'Pelkkä ilmoitus':\n # Skip courses\n return False\n\n if self.options['single']:\n if str(eid) != self.options['single']:\n return False\n\n event = events[eid]\n event['data_source'] = self.data_source\n event['publisher'] = self.organization\n event['origin_id'] = eid\n\n title = text_content('title')\n subtitle = text_content('subtitle')\n event['headline'][lang] = title\n event['secondary_headline'][lang] = subtitle\n event['name'][lang] = make_event_name(title, subtitle)\n\n caption = text_content('caption')\n bodytext = text_content('bodytext')\n description = ''\n if caption:\n description += caption\n event['short_description'][lang] = caption\n if caption and bodytext:\n description += \"\\n\\n\"\n if bodytext:\n description += bodytext\n if description:\n event['description'][lang] = description\n\n event['info_url'][lang] = text_content('www')\n # todo: process extra links?\n links = event_el.find(tag('links'))\n if links is not None:\n links = links.findall(tag('link'))\n assert len(links)\n else:\n links = []\n external_links = []\n for link_el in links:\n link = unicodetext(link_el)\n if not re.match(r'^\\w+?://', link):\n link = 'http://' + link\n try:\n self.url_validator(link)\n except ValidationError:\n continue\n except ValueError:\n print('value error with event %s and url %s ' % (eid, link))\n external_links.append({'link': link})\n event['external_links'][lang] = external_links\n\n eventattachments = event_el.find(tag('attachments'))\n if eventattachments is not None:\n for attachment in eventattachments:\n if attachment.attrib['type'] == 'teaserimage':\n event['image'] = unicodetext(attachment).strip()\n break\n\n event['provider'][lang] = text_content('organizer')\n\n start_time = dateutil.parser.parse(text('starttime'))\n # Start and end times are in GMT. Sometimes only dates are provided.\n # If it's just a date, tzinfo is None.\n # FIXME: Mark that time is missing somehow?\n if not start_time.tzinfo:\n assert start_time.hour == 0 and start_time.minute == 0 and start_time.second == 0\n start_time = LOCAL_TZ.localize(start_time)\n event['has_start_time'] = False\n else:\n start_time = start_time.astimezone(LOCAL_TZ)\n event['has_start_time'] = True\n event['start_time'] = start_time\n if text('endtime'):\n end_time = dateutil.parser.parse(text('endtime'))\n if not end_time.tzinfo:\n assert end_time.hour == 0 and end_time.minute == 0 and end_time.second == 0\n end_time = LOCAL_TZ.localize(end_time)\n event['has_end_time'] = False\n else:\n end_time = end_time.astimezone(LOCAL_TZ)\n event['has_end_time'] = True\n\n event['end_time'] = end_time\n\n # todo: verify enrolment use cases, proper fields\n event['custom']['enrolment']['start_time'] = dateutil.parser.parse(\n text('enrolmentstarttime')\n )\n event['custom']['enrolment']['end_time'] = dateutil.parser.parse(\n text('enrolmentendtime')\n )\n\n if 'offers' not in event:\n event['offers'] = [recur_dict()]\n\n offer = event['offers'][0]\n price = text_content('price')\n price_el = event_el.find(tag('price'))\n free = (price_el.attrib['free'] == \"true\")\n\n offer['is_free'] = free\n description = price_el.get('ticketinfo')\n if description and 'href' in description:\n # the field sometimes contains some really bad invalid html\n # snippets\n description = None\n offer['description'][lang] = description\n if not free:\n offer['price'][lang] = price\n offer['info_url'][lang] = price_el.get('ticketlink')\n\n if hasattr(self, 'categories'):\n event_keywords = set()\n for category_id in event_el.find(tag('categories')):\n category = self.categories.get(int(category_id.text))\n if category:\n # YSO keywords\n if category.get('yso_keywords'):\n for c in category.get('yso_keywords', []):\n event_keywords.add(c)\n # Also save original kulke categories as keywords\n kulke_id = make_kulke_id(category_id.text)\n try:\n kulke_keyword = Keyword.objects.get(pk=kulke_id)\n event_keywords.add(kulke_keyword)\n except Keyword.DoesNotExist:\n print('Could not find {}'.format(kulke_id))\n\n event['keywords'] = event_keywords\n\n location = event['location']\n\n location['street_address'][lang] = text_content('address')\n location['postal_code'] = text_content('postalcode')\n municipality = text_content('postaloffice')\n if municipality == 'Helsingin kaupunki':\n municipality = 'Helsinki'\n location['address_locality'][lang] = municipality\n location['telephone'][lang] = text_content('phone')\n location['name'] = text_content('location')\n\n if not 'place' in location:\n self.find_place(event)\n return True\n\n def _gather_recurring_events(self, lang, event_el, events, recurring_groups):\n references = event_el.find('eventreferences')\n this_id = int(event_el.attrib['id'])\n if references is None or len(references) < 1:\n group = set()\n else:\n recurs = references.findall('recurring') or []\n recur_ids = map(lambda x: int(x.attrib['id']), recurs)\n group = set(recur_ids)\n group.add(this_id)\n recurring_groups[this_id] = group\n\n def _verify_recurs(self, recurring_groups):\n for key, group in recurring_groups.items():\n for inner_key in group:\n inner_group = recurring_groups.get(inner_key)\n if inner_group and inner_group != group:\n print('Differing groups:', key, inner_key)\n print('Differing groups:', group, inner_group)\n if len(inner_group) == 0:\n print(\n 'Event self-identifies to no group, removing.',\n inner_key\n )\n group.remove(inner_key)\n\n def _update_super_event(self, super_event):\n events = super_event.get_children()\n first_event = events.order_by('start_time').first()\n super_event.start_time = first_event.start_time\n super_event.has_start_time = first_event.has_start_time\n last_event = events.order_by('-end_time').first()\n super_event.end_time = last_event.end_time\n super_event.has_end_time = last_event.has_end_time\n\n # Functions which map related models into simple comparable values.\n def simple(field):\n return frozenset(map(lambda x: x.simple_value(), field.all()))\n value_mappers = {\n 'offers': simple,\n 'external_links': simple\n }\n fieldnames = expand_model_fields(\n super_event, [\n 'info_url', 'description', 'short_description', 'headline',\n 'secondary_headline', 'provider', 'publisher', 'location',\n 'location_extra_info', 'data_source',\n 'image', 'offers', 'external_links'])\n\n # The set of fields which have common values for all events.\n common_fields = set(\n f for f in fieldnames\n if 1 == len(set(map(\n value_mappers.get(f, lambda x: x),\n (getattr(event, f) for event in events.all())))))\n\n for fieldname in common_fields:\n value = getattr(events.first(), fieldname)\n if hasattr(value, 'all'):\n manager = getattr(super_event, fieldname)\n simple = False\n if hasattr(value.first(), 'simple_value'):\n # Simple related models can be deleted and copied.\n manager.all().delete()\n simple = True\n for m in value.all():\n if simple:\n m.id = None\n m.event_id = super_event.id\n m.save()\n manager.add(m)\n else:\n setattr(super_event, fieldname, value)\n\n # The name may vary within a recurring event; hence, take the common part\n if expand_model_fields(super_event, ['headline'])[0] not in common_fields:\n words = getattr(events.first(), 'headline').split(' ')\n name = ''\n is_common = lambda: all(\n headline.startswith(name + words[0])\n for headline in [event.name for event in events]\n )\n while words and is_common():\n name += words.pop(0) + ' '\n print(words)\n print(name)\n setattr(super_event, 'name', name)\n\n for lang in self.languages.keys():\n headline = getattr(\n super_event, 'headline_{}'.format(lang)\n )\n secondary_headline = getattr(\n super_event, 'secondary_headline_{}'.format(lang)\n )\n setattr(super_event, 'name_{}'.format(lang),\n make_event_name(headline, secondary_headline)\n )\n\n # Gather common keywords present in *all* subevents\n common_keywords = functools.reduce(\n lambda x, y: x & y,\n (set(event.keywords.all()) for event in events.all())\n )\n super_event.keywords.clear()\n for k in common_keywords:\n super_event.keywords.add(k)\n\n super_event.save()\n\n def _save_recurring_superevents(self, recurring_groups):\n groups = map(frozenset, recurring_groups.values())\n aggregates = set()\n for group in groups:\n kulke_ids = set(map(make_kulke_id, group))\n superevent_aggregates = EventAggregate.objects.filter(\n members__event__id__in=kulke_ids\n ).distinct()\n cnt = superevent_aggregates.count()\n\n if cnt > 1:\n print('Error: the superevent has an ambiguous aggregate group.')\n print('Aggregate ids: {}, group: {}'.format(\n superevent_aggregates.values_list('id', flat=True), group))\n continue\n\n events = Event.objects.filter(id__in=kulke_ids)\n if events.count() < 2:\n continue\n\n aggregate = None\n if cnt == 0:\n if len(group) == 1:\n # Do not create aggregates of only one.\n continue\n aggregate = EventAggregate()\n aggregate.save()\n super_event = Event(\n publisher=self.organization,\n is_recurring_super=True,\n data_source=DataSource.objects.get(pk='kulke'), # TODO\n id=\"linkedevents:agg-{}\".format(aggregate.id))\n super_event.save()\n aggregate.super_event = super_event\n aggregate.save()\n for event in events:\n member = EventAggregateMember.objects.create(event=event,\n event_aggregate=aggregate)\n elif cnt == 1:\n aggregate = superevent_aggregates.first()\n if len(group) == 1:\n events = Event.objects.get(\n pk=make_kulke_id(group.pop()))\n # The imported event is not part of an aggregate\n # but one was found it in the db. Remove the event\n # from the aggregate. This is the only case when\n # an event is removed from a recurring aggregate.\n aggregate.members.remove(events)\n else:\n for event in events:\n try:\n member = EventAggregateMember.objects.create(event=event,\n event_aggregate=aggregate)\n except IntegrityError:\n # Ignore unique violations. They\n # ensure that no duplicate members are added.\n pass\n for event in events:\n event.super_event = aggregate.super_event\n event.save()\n aggregates.add(aggregate)\n return aggregates\n\n def import_events(self):\n print(\"Importing Kulke events\")\n self.url_validator = URLValidator()\n events = recur_dict()\n recurring_groups = dict()\n for lang in ['fi', 'sv', 'en']:\n events_file = os.path.join(\n settings.IMPORT_FILE_PATH, 'kulke', 'events-%s.xml' % lang)\n root = etree.parse(events_file)\n for event_el in root.xpath('/eventdata/event'):\n success = self._import_event(lang, event_el, events)\n if success:\n self._gather_recurring_events(lang, event_el, events, recurring_groups)\n\n events.default_factory = None\n\n filter_out_keywords = set(map(\n make_kulke_id,\n SKIP_EVENTS_WITH_CATEGORY\n ))\n for eid, event in events.items():\n skip = False\n for kw in event['keywords']:\n if kw.id in filter_out_keywords:\n skip = True\n break\n if skip:\n continue\n self.save_event(event)\n\n self._verify_recurs(recurring_groups)\n aggregates = self._save_recurring_superevents(recurring_groups)\n for agg in aggregates:\n self._update_super_event(agg.super_event)\n\n def import_keywords(self):\n print(\"Importing Kulke categories as keywords\")\n categories = self.parse_kulke_categories()\n for kid, value in categories.items():\n try:\n # if the keyword exists, update the name if needed\n word = Keyword.objects.get(id=make_kulke_id(kid))\n if word.name != value['text']:\n word.name = value['text']\n word.save()\n except ObjectDoesNotExist:\n # if the keyword does not exist, save it for future use\n Keyword.objects.create(\n id=make_kulke_id(kid),\n name=value['text'],\n data_source=self.data_source\n )\n", "path": "events/importer/kulke.py" } ]
diff --git a/events/importer/kulke.py b/events/importer/kulke.py index 458485b2e..bc3e86fa5 100644 --- a/events/importer/kulke.py +++ b/events/importer/kulke.py @@ -298,6 +298,7 @@ def clean(t): description = '' if caption: description += caption + event['short_description'][lang] = caption if caption and bodytext: description += "\n\n" if bodytext:
sql-machine-learning__elasticdl-532
Fix logging issue Currently, the logging is not working as intended. E.g. the formatting was ignored for WARNING and INFO log are not printed out.
[ { "content": "import logging\nimport time\nimport argparse\nimport os\nimport recordio\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom contextlib import closing\nfrom concurrent import futures\nfrom elasticdl.proto import elasticdl_pb2_grpc\nfrom elasticdl.python.elasticdl.master.servicer import MasterServicer\nfrom elasticdl.python.elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.python.elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with closing(recordio.Index(p)) as rio:\n f_records[p] = rio.num_records()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_cpu_request\",\n help=\"the minimal cpu required by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_cpu_limit\",\n help=\"the maximal cpu used by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_memory_request\",\n help=\"the minimal memory required by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_memory_limit\",\n help=\"the maximal memory used by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n help=\"the requested priority of worker pod\")\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec_type\",\n default=\"bytes\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\"--volume_name\",\n help=\"the volume name of network filesytem\")\n parser.add_argument(\"--mount_path\",\n help=\"the mount path in the docker container\")\n parser.add_argument(\n \"--log_level\",\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n type=str.upper,\n default='WARNING',\n help=\"the logging level\",\n )\n parser.add_argument(\"--image_pull_policy\",\n help=\"the image pull policy of master and worker\")\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n\n # TODO: pass port via flags.\n PORT = 50001\n\n # Initialize logger\n logging.basicConfig(\n format='%(asctime)s %(name)s %(levelname)-8s '\n '[%(filename)s:%(lineno)d] %(message)s',\n )\n # Set level for ROOT logger.\n logging.getLogger().setLevel(args.log_level)\n logger = logging.getLogger(__name__)\n\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n elasticdl_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.info(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.python.elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec_type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n task_q,\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n cpu_request=args.worker_cpu_request,\n cpu_limit=args.worker_cpu_limit,\n memory_request=args.worker_memory_request,\n memory_limit=args.worker_memory_limit,\n pod_priority=args.worker_pod_priority,\n mount_path=args.mount_path,\n volume_name=args.volume_name,\n image_pull_policy=args.image_pull_policy,\n restart_policy=\"Never\",\n )\n worker_manager.start_workers()\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/elasticdl/master/main.py" } ]
[ { "content": "import logging\nimport time\nimport argparse\nimport os\nimport recordio\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom contextlib import closing\nfrom concurrent import futures\nfrom elasticdl.proto import elasticdl_pb2_grpc\nfrom elasticdl.python.elasticdl.master.servicer import MasterServicer\nfrom elasticdl.python.elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.python.elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with closing(recordio.Index(p)) as rio:\n f_records[p] = rio.num_records()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_cpu_request\",\n help=\"the minimal cpu required by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_cpu_limit\",\n help=\"the maximal cpu used by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_memory_request\",\n help=\"the minimal memory required by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_memory_limit\",\n help=\"the maximal memory used by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n help=\"the requested priority of worker pod\")\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec_type\",\n default=\"bytes\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\"--volume_name\",\n help=\"the volume name of network filesytem\")\n parser.add_argument(\"--mount_path\",\n help=\"the mount path in the docker container\")\n parser.add_argument(\n \"--log_level\",\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n type=str.upper,\n default='WARNING',\n help=\"the logging level\",\n )\n parser.add_argument(\"--image_pull_policy\",\n help=\"the image pull policy of master and worker\")\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n\n # TODO: pass port via flags.\n PORT = 50001\n\n # Initialize logger\n logging.basicConfig(\n format='%(asctime)s %(name)s %(levelname)-8s '\n '[%(filename)s:%(lineno)d] %(message)s',\n )\n # Set level for ROOT logger.\n logging.getLogger().setLevel(args.log_level)\n logger = logging.getLogger(__name__)\n\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n elasticdl_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.info(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.python.elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec_type\",\n args.codec_type,\n \"--log_level\",\n args.log_level\n ]\n\n worker_manager = WorkerManager(\n task_q,\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n cpu_request=args.worker_cpu_request,\n cpu_limit=args.worker_cpu_limit,\n memory_request=args.worker_memory_request,\n memory_limit=args.worker_memory_limit,\n pod_priority=args.worker_pod_priority,\n mount_path=args.mount_path,\n volume_name=args.volume_name,\n image_pull_policy=args.image_pull_policy,\n restart_policy=\"Never\",\n )\n worker_manager.start_workers()\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/elasticdl/master/main.py" } ]
diff --git a/elasticdl/python/elasticdl/master/main.py b/elasticdl/python/elasticdl/master/main.py index 3cc388df8..9e5cc153b 100644 --- a/elasticdl/python/elasticdl/master/main.py +++ b/elasticdl/python/elasticdl/master/main.py @@ -157,7 +157,9 @@ def main(): "--master_addr", master_addr, "--codec_type", - args.codec_type + args.codec_type, + "--log_level", + args.log_level ] worker_manager = WorkerManager(
WeblateOrg__weblate-4665
migrations fail for database name containing "-" **Describe the bug** Applying memory.0007_use_trigram...Traceback (most recent call last): File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute return self.cursor.execute(sql, params) psycopg2.errors.SyntaxError: syntax error at or near "-" LINE 1: ALTER DATABASE weblate-staging SET pg_trgm.similarity_thresh... ^ **To Reproduce** Set the database name to "weblate-staging" I worked around this by changing of ALTER DATABASE {} SET to ALTER DATABASE \"{}\" SET in 0007_use_trigram.py and 0008_adjust_similarity.py. weblate-4.1.1
[ { "content": "# Generated by Django 3.0.5 on 2020-05-12 11:44\n\nfrom django.db import migrations\n\n\ndef update_index(apps, schema_editor):\n if schema_editor.connection.vendor != \"postgresql\":\n return\n # This ensures that extensions are loaded into the session. Without that\n # the next ALTER database fails unless we're running as superuser (which\n # is allowed to set non existing parameters, so missing extension doesn't\n # matter)\n # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us\n schema_editor.execute(\"SELECT show_limit()\")\n\n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n schema_editor.connection.settings_dict[\"USER\"]\n )\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"memory\", \"0007_use_trigram\"),\n ]\n\n operations = [\n migrations.RunPython(\n update_index, migrations.RunPython.noop, elidable=False, atomic=False\n )\n ]\n", "path": "weblate/memory/migrations/0008_adjust_similarity.py" } ]
[ { "content": "# Generated by Django 3.0.5 on 2020-05-12 11:44\n\nfrom django.db import migrations\n\n\ndef update_index(apps, schema_editor):\n if schema_editor.connection.vendor != \"postgresql\":\n return\n # This ensures that extensions are loaded into the session. Without that\n # the next ALTER database fails unless we're running as superuser (which\n # is allowed to set non existing parameters, so missing extension doesn't\n # matter)\n # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us\n schema_editor.execute(\"SELECT show_limit()\")\n\n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n schema_editor.quote_name(schema_editor.connection.settings_dict[\"USER\"])\n )\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"memory\", \"0007_use_trigram\"),\n ]\n\n operations = [\n migrations.RunPython(\n update_index, migrations.RunPython.noop, elidable=False, atomic=False\n )\n ]\n", "path": "weblate/memory/migrations/0008_adjust_similarity.py" } ]
diff --git a/weblate/memory/migrations/0008_adjust_similarity.py b/weblate/memory/migrations/0008_adjust_similarity.py index 1cb119c0a701..fd125cca4090 100644 --- a/weblate/memory/migrations/0008_adjust_similarity.py +++ b/weblate/memory/migrations/0008_adjust_similarity.py @@ -15,7 +15,7 @@ def update_index(apps, schema_editor): schema_editor.execute( "ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5".format( - schema_editor.connection.settings_dict["USER"] + schema_editor.quote_name(schema_editor.connection.settings_dict["USER"]) ) )
wright-group__WrightTools-552
setter for null Currently null is not settable on a channel It can be worked around with `channel.attrs['null']`
[ { "content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = 'Channel'\n\n def __init__(self, parent, id, *, units=None, null=None, signed=None, label=None,\n label_seed=None, **kwargs):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs['name'] = h5py.h5i.get_name(self.id).decode().split('/')[-1]\n self.attrs['class'] = 'Channel'\n if signed is not None:\n self.attrs['signed'] = signed\n if null is not None:\n self.attrs['null'] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def minor_extent(self):\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs['name']\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n index = wt_kit.get_index(self.parent.channel_names, self.natural_name)\n new = list(self.parent.channel_names)\n new[index] = value\n self.parent.channel_names = new\n self.attrs['name'] = value\n self._natural_name = None\n\n @property\n def null(self):\n if 'null' not in self.attrs.keys():\n self.attrs['null'] = 0\n return self.attrs['null']\n\n @property\n def major_extent(self):\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def signed(self):\n if 'signed' not in self.attrs.keys():\n self.attrs['signed'] = False\n return self.attrs['signed']\n\n @signed.setter\n def signed(self, value):\n self.attrs['signed'] = value\n\n def mag(self):\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to 1.\"\"\"\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n if self.signed:\n mag = self.mag()\n else:\n mag = self.max()\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method='ztest', factor=3, replace='nan',\n verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'mask', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n mask\n Array is masked at outliers.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n raise NotImplementedError\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == 'nan':\n self[i] = np.nan\n elif replace == 'mean':\n self[i] = means\n elif replace == 'mask':\n self[:] = np.ma.array(self[:])\n self[i] = np.ma.masked\n elif type(replace) in [int, float]:\n self[i] = replace\n else:\n raise KeyError('replace must be one of {nan, mean, mask} or some number')\n # finish\n if verbose:\n print('%i outliers removed' % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py" } ]
[ { "content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = 'Channel'\n\n def __init__(self, parent, id, *, units=None, null=None, signed=None, label=None,\n label_seed=None, **kwargs):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs['name'] = h5py.h5i.get_name(self.id).decode().split('/')[-1]\n self.attrs['class'] = 'Channel'\n if signed is not None:\n self.attrs['signed'] = signed\n if null is not None:\n self.attrs['null'] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def minor_extent(self):\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs['name']\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n index = wt_kit.get_index(self.parent.channel_names, self.natural_name)\n new = list(self.parent.channel_names)\n new[index] = value\n self.parent.channel_names = new\n self.attrs['name'] = value\n self._natural_name = None\n\n @property\n def null(self):\n if 'null' not in self.attrs.keys():\n self.attrs['null'] = 0\n return self.attrs['null']\n\n @null.setter\n def null(self, value):\n self.attrs['null'] = value\n\n @property\n def major_extent(self):\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def signed(self):\n if 'signed' not in self.attrs.keys():\n self.attrs['signed'] = False\n return self.attrs['signed']\n\n @signed.setter\n def signed(self, value):\n self.attrs['signed'] = value\n\n def mag(self):\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to 1.\"\"\"\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n if self.signed:\n mag = self.mag()\n else:\n mag = self.max()\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method='ztest', factor=3, replace='nan',\n verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'mask', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n mask\n Array is masked at outliers.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n raise NotImplementedError\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == 'nan':\n self[i] = np.nan\n elif replace == 'mean':\n self[i] = means\n elif replace == 'mask':\n self[:] = np.ma.array(self[:])\n self[i] = np.ma.masked\n elif type(replace) in [int, float]:\n self[i] = replace\n else:\n raise KeyError('replace must be one of {nan, mean, mask} or some number')\n # finish\n if verbose:\n print('%i outliers removed' % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py" } ]
diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py index 5302ada5b..f5f32892c 100644 --- a/WrightTools/data/_channel.py +++ b/WrightTools/data/_channel.py @@ -93,6 +93,10 @@ def null(self): self.attrs['null'] = 0 return self.attrs['null'] + @null.setter + def null(self, value): + self.attrs['null'] = value + @property def major_extent(self): """Maximum deviation from null.""" diff --git a/tests/data/channel/null.py b/tests/data/channel/null.py new file mode 100644 index 000000000..b45ca661e --- /dev/null +++ b/tests/data/channel/null.py @@ -0,0 +1,28 @@ +"""Tests to do with null.""" + + +# --- import -------------------------------------------------------------------------------------- + + +import WrightTools as wt + +from WrightTools import datasets + + +# --- test ---------------------------------------------------------------------------------------- + + +def test_setter(): + p = datasets.BrunoldrRaman.LDS821_514nm_80mW + data = wt.data.from_BrunoldrRaman(p) + assert data.signal.null == 0 + data.signal.null = 5 + assert data.signal.null == 5 + data.close() + + +# --- run ----------------------------------------------------------------------------------------- + + +if __name__ == '__main__': + test_setter()
encode__uvicorn-943
Docs: workers = 1 when WEB_CONCURRENCY is not set Hello, thanks for this great ASGI server 🙂 I have an application that must use only one worker (it's not thread-safe), and so I wanted to confirm that, by default, uvicorn only uses one worker. But I had to read the source code https://github.com/encode/uvicorn/blob/afb2d565c8dae859bcef4c76b3c6dc3f3077314d/uvicorn/config.py#L177 ...to confirm that when `WEB_CONCURRENCY` is not defined, and `workers` is not specified, 1 is used. Would you accept a PR adding that information to the docs (there are 4-5 places where this can be added)?
[ { "content": "import logging\nimport platform\nimport ssl\nimport sys\nimport typing\n\nimport click\n\nimport uvicorn\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n INTERFACES,\n LIFESPAN,\n LOG_LEVELS,\n LOGGING_CONFIG,\n LOOP_SETUPS,\n SSL_PROTOCOL_VERSION,\n WS_PROTOCOLS,\n Config,\n)\nfrom uvicorn.server import Server, ServerState # noqa: F401 # Used to be defined here.\nfrom uvicorn.supervisors import ChangeReload, Multiprocess\n\nLEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())\nHTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())\nWS_CHOICES = click.Choice(WS_PROTOCOLS.keys())\nLIFESPAN_CHOICES = click.Choice(LIFESPAN.keys())\nLOOP_CHOICES = click.Choice([key for key in LOOP_SETUPS.keys() if key != \"none\"])\nINTERFACE_CHOICES = click.Choice(INTERFACES)\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(\n \"Running uvicorn %s with %s %s on %s\"\n % (\n uvicorn.__version__,\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )\n )\n ctx.exit()\n\n\[email protected]()\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\", hidden=True\n)\[email protected](\"--reload\", is_flag=True, default=False, help=\"Enable auto-reload.\")\[email protected](\n \"--reload-dir\",\n \"reload_dirs\",\n multiple=True,\n help=\"Set reload directories explicitly, instead of using the current working\"\n \" directory.\",\n)\[email protected](\n \"--reload-delay\",\n type=float,\n default=0.25,\n show_default=True,\n help=\"Delay between previous and next check if application needs to be.\"\n \" Defaults to 0.25s.\",\n)\[email protected](\n \"--workers\",\n default=None,\n type=int,\n help=\"Number of worker processes. Defaults to the $WEB_CONCURRENCY environment\"\n \" variable if available. Not valid with --reload.\",\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--interface\",\n type=INTERFACE_CHOICES,\n default=\"auto\",\n help=\"Select ASGI3, ASGI2, or WSGI as the application interface.\",\n show_default=True,\n)\[email protected](\n \"--env-file\",\n type=click.Path(exists=True),\n default=None,\n help=\"Environment configuration file.\",\n show_default=True,\n)\[email protected](\n \"--log-config\",\n type=click.Path(exists=True),\n default=None,\n help=\"Logging configuration file. Supported formats: .ini, .json, .yaml.\",\n show_default=True,\n)\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=None,\n help=\"Log level. [default: info]\",\n show_default=True,\n)\[email protected](\n \"--access-log/--no-access-log\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable access log.\",\n)\[email protected](\n \"--use-colors/--no-use-colors\",\n is_flag=True,\n default=None,\n help=\"Enable/Disable colorized logging.\",\n)\[email protected](\n \"--proxy-headers/--no-proxy-headers\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to \"\n \"populate remote address info.\",\n)\[email protected](\n \"--forwarded-allow-ips\",\n type=str,\n default=None,\n help=\"Comma seperated list of IPs to trust with proxy headers. Defaults to\"\n \" the $FORWARDED_ALLOW_IPS environment variable if available, or '127.0.0.1'.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing\"\n \" HTTP 503 responses.\",\n)\[email protected](\n \"--backlog\",\n type=int,\n default=2048,\n help=\"Maximum number of connections to hold in backlog\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile\", type=str, default=None, help=\"SSL key file\", show_default=True\n)\[email protected](\n \"--ssl-certfile\",\n type=str,\n default=None,\n help=\"SSL certificate file\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile-password\",\n type=str,\n default=None,\n help=\"SSL keyfile password\",\n show_default=True,\n)\[email protected](\n \"--ssl-version\",\n type=int,\n default=SSL_PROTOCOL_VERSION,\n help=\"SSL version to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-cert-reqs\",\n type=int,\n default=ssl.CERT_NONE,\n help=\"Whether client certificate is required (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-ca-certs\",\n type=str,\n default=None,\n help=\"CA certificates file\",\n show_default=True,\n)\[email protected](\n \"--ssl-ciphers\",\n type=str,\n default=\"TLSv1\",\n help=\"Ciphers to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--header\",\n \"headers\",\n multiple=True,\n help=\"Specify custom default HTTP response headers as a Name:Value pair\",\n)\[email protected](\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n help=\"Display the uvicorn version and exit.\",\n)\[email protected](\n \"--app-dir\",\n \"app_dir\",\n default=\".\",\n show_default=True,\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH.\"\n \" Defaults to the current working directory.\",\n)\[email protected](\n \"--factory\",\n is_flag=True,\n default=False,\n help=\"Treat APP as an application factory, i.e. a () -> <ASGI app> callable.\",\n show_default=True,\n)\ndef main(\n app,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: str,\n http: str,\n ws: str,\n lifespan: str,\n interface: str,\n debug: bool,\n reload: bool,\n reload_dirs: typing.List[str],\n reload_delay: float,\n workers: int,\n env_file: str,\n log_config: str,\n log_level: str,\n access_log: bool,\n proxy_headers: bool,\n forwarded_allow_ips: str,\n root_path: str,\n limit_concurrency: int,\n backlog: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n ssl_keyfile: str,\n ssl_certfile: str,\n ssl_keyfile_password: str,\n ssl_version: int,\n ssl_cert_reqs: int,\n ssl_ca_certs: str,\n ssl_ciphers: str,\n headers: typing.List[str],\n use_colors: bool,\n app_dir: str,\n factory: bool,\n):\n sys.path.insert(0, app_dir)\n\n kwargs = {\n \"app\": app,\n \"host\": host,\n \"port\": port,\n \"uds\": uds,\n \"fd\": fd,\n \"loop\": loop,\n \"http\": http,\n \"ws\": ws,\n \"lifespan\": lifespan,\n \"env_file\": env_file,\n \"log_config\": LOGGING_CONFIG if log_config is None else log_config,\n \"log_level\": log_level,\n \"access_log\": access_log,\n \"interface\": interface,\n \"debug\": debug,\n \"reload\": reload,\n \"reload_dirs\": reload_dirs if reload_dirs else None,\n \"reload_delay\": reload_delay,\n \"workers\": workers,\n \"proxy_headers\": proxy_headers,\n \"forwarded_allow_ips\": forwarded_allow_ips,\n \"root_path\": root_path,\n \"limit_concurrency\": limit_concurrency,\n \"backlog\": backlog,\n \"limit_max_requests\": limit_max_requests,\n \"timeout_keep_alive\": timeout_keep_alive,\n \"ssl_keyfile\": ssl_keyfile,\n \"ssl_certfile\": ssl_certfile,\n \"ssl_keyfile_password\": ssl_keyfile_password,\n \"ssl_version\": ssl_version,\n \"ssl_cert_reqs\": ssl_cert_reqs,\n \"ssl_ca_certs\": ssl_ca_certs,\n \"ssl_ciphers\": ssl_ciphers,\n \"headers\": [header.split(\":\", 1) for header in headers],\n \"use_colors\": use_colors,\n \"factory\": factory,\n }\n run(**kwargs)\n\n\ndef run(app, **kwargs):\n config = Config(app, **kwargs)\n server = Server(config=config)\n\n if (config.reload or config.workers > 1) and not isinstance(app, str):\n logger = logging.getLogger(\"uvicorn.error\")\n logger.warning(\n \"You must pass the application as an import string to enable 'reload' or \"\n \"'workers'.\"\n )\n sys.exit(1)\n\n if config.should_reload:\n sock = config.bind_socket()\n supervisor = ChangeReload(config, target=server.run, sockets=[sock])\n supervisor.run()\n elif config.workers > 1:\n sock = config.bind_socket()\n supervisor = Multiprocess(config, target=server.run, sockets=[sock])\n supervisor.run()\n else:\n server.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "uvicorn/main.py" } ]
[ { "content": "import logging\nimport platform\nimport ssl\nimport sys\nimport typing\n\nimport click\n\nimport uvicorn\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n INTERFACES,\n LIFESPAN,\n LOG_LEVELS,\n LOGGING_CONFIG,\n LOOP_SETUPS,\n SSL_PROTOCOL_VERSION,\n WS_PROTOCOLS,\n Config,\n)\nfrom uvicorn.server import Server, ServerState # noqa: F401 # Used to be defined here.\nfrom uvicorn.supervisors import ChangeReload, Multiprocess\n\nLEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())\nHTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())\nWS_CHOICES = click.Choice(WS_PROTOCOLS.keys())\nLIFESPAN_CHOICES = click.Choice(LIFESPAN.keys())\nLOOP_CHOICES = click.Choice([key for key in LOOP_SETUPS.keys() if key != \"none\"])\nINTERFACE_CHOICES = click.Choice(INTERFACES)\n\nlogger = logging.getLogger(\"uvicorn.error\")\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(\n \"Running uvicorn %s with %s %s on %s\"\n % (\n uvicorn.__version__,\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )\n )\n ctx.exit()\n\n\[email protected]()\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\", hidden=True\n)\[email protected](\"--reload\", is_flag=True, default=False, help=\"Enable auto-reload.\")\[email protected](\n \"--reload-dir\",\n \"reload_dirs\",\n multiple=True,\n help=\"Set reload directories explicitly, instead of using the current working\"\n \" directory.\",\n)\[email protected](\n \"--reload-delay\",\n type=float,\n default=0.25,\n show_default=True,\n help=\"Delay between previous and next check if application needs to be.\"\n \" Defaults to 0.25s.\",\n)\[email protected](\n \"--workers\",\n default=None,\n type=int,\n help=\"Number of worker processes. Defaults to the $WEB_CONCURRENCY environment\"\n \" variable if available, or 1. Not valid with --reload.\",\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--interface\",\n type=INTERFACE_CHOICES,\n default=\"auto\",\n help=\"Select ASGI3, ASGI2, or WSGI as the application interface.\",\n show_default=True,\n)\[email protected](\n \"--env-file\",\n type=click.Path(exists=True),\n default=None,\n help=\"Environment configuration file.\",\n show_default=True,\n)\[email protected](\n \"--log-config\",\n type=click.Path(exists=True),\n default=None,\n help=\"Logging configuration file. Supported formats: .ini, .json, .yaml.\",\n show_default=True,\n)\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=None,\n help=\"Log level. [default: info]\",\n show_default=True,\n)\[email protected](\n \"--access-log/--no-access-log\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable access log.\",\n)\[email protected](\n \"--use-colors/--no-use-colors\",\n is_flag=True,\n default=None,\n help=\"Enable/Disable colorized logging.\",\n)\[email protected](\n \"--proxy-headers/--no-proxy-headers\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to \"\n \"populate remote address info.\",\n)\[email protected](\n \"--forwarded-allow-ips\",\n type=str,\n default=None,\n help=\"Comma seperated list of IPs to trust with proxy headers. Defaults to\"\n \" the $FORWARDED_ALLOW_IPS environment variable if available, or '127.0.0.1'.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing\"\n \" HTTP 503 responses.\",\n)\[email protected](\n \"--backlog\",\n type=int,\n default=2048,\n help=\"Maximum number of connections to hold in backlog\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile\", type=str, default=None, help=\"SSL key file\", show_default=True\n)\[email protected](\n \"--ssl-certfile\",\n type=str,\n default=None,\n help=\"SSL certificate file\",\n show_default=True,\n)\[email protected](\n \"--ssl-keyfile-password\",\n type=str,\n default=None,\n help=\"SSL keyfile password\",\n show_default=True,\n)\[email protected](\n \"--ssl-version\",\n type=int,\n default=SSL_PROTOCOL_VERSION,\n help=\"SSL version to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-cert-reqs\",\n type=int,\n default=ssl.CERT_NONE,\n help=\"Whether client certificate is required (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--ssl-ca-certs\",\n type=str,\n default=None,\n help=\"CA certificates file\",\n show_default=True,\n)\[email protected](\n \"--ssl-ciphers\",\n type=str,\n default=\"TLSv1\",\n help=\"Ciphers to use (see stdlib ssl module's)\",\n show_default=True,\n)\[email protected](\n \"--header\",\n \"headers\",\n multiple=True,\n help=\"Specify custom default HTTP response headers as a Name:Value pair\",\n)\[email protected](\n \"--version\",\n is_flag=True,\n callback=print_version,\n expose_value=False,\n is_eager=True,\n help=\"Display the uvicorn version and exit.\",\n)\[email protected](\n \"--app-dir\",\n \"app_dir\",\n default=\".\",\n show_default=True,\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH.\"\n \" Defaults to the current working directory.\",\n)\[email protected](\n \"--factory\",\n is_flag=True,\n default=False,\n help=\"Treat APP as an application factory, i.e. a () -> <ASGI app> callable.\",\n show_default=True,\n)\ndef main(\n app,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: str,\n http: str,\n ws: str,\n lifespan: str,\n interface: str,\n debug: bool,\n reload: bool,\n reload_dirs: typing.List[str],\n reload_delay: float,\n workers: int,\n env_file: str,\n log_config: str,\n log_level: str,\n access_log: bool,\n proxy_headers: bool,\n forwarded_allow_ips: str,\n root_path: str,\n limit_concurrency: int,\n backlog: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n ssl_keyfile: str,\n ssl_certfile: str,\n ssl_keyfile_password: str,\n ssl_version: int,\n ssl_cert_reqs: int,\n ssl_ca_certs: str,\n ssl_ciphers: str,\n headers: typing.List[str],\n use_colors: bool,\n app_dir: str,\n factory: bool,\n):\n sys.path.insert(0, app_dir)\n\n kwargs = {\n \"app\": app,\n \"host\": host,\n \"port\": port,\n \"uds\": uds,\n \"fd\": fd,\n \"loop\": loop,\n \"http\": http,\n \"ws\": ws,\n \"lifespan\": lifespan,\n \"env_file\": env_file,\n \"log_config\": LOGGING_CONFIG if log_config is None else log_config,\n \"log_level\": log_level,\n \"access_log\": access_log,\n \"interface\": interface,\n \"debug\": debug,\n \"reload\": reload,\n \"reload_dirs\": reload_dirs if reload_dirs else None,\n \"reload_delay\": reload_delay,\n \"workers\": workers,\n \"proxy_headers\": proxy_headers,\n \"forwarded_allow_ips\": forwarded_allow_ips,\n \"root_path\": root_path,\n \"limit_concurrency\": limit_concurrency,\n \"backlog\": backlog,\n \"limit_max_requests\": limit_max_requests,\n \"timeout_keep_alive\": timeout_keep_alive,\n \"ssl_keyfile\": ssl_keyfile,\n \"ssl_certfile\": ssl_certfile,\n \"ssl_keyfile_password\": ssl_keyfile_password,\n \"ssl_version\": ssl_version,\n \"ssl_cert_reqs\": ssl_cert_reqs,\n \"ssl_ca_certs\": ssl_ca_certs,\n \"ssl_ciphers\": ssl_ciphers,\n \"headers\": [header.split(\":\", 1) for header in headers],\n \"use_colors\": use_colors,\n \"factory\": factory,\n }\n run(**kwargs)\n\n\ndef run(app, **kwargs):\n config = Config(app, **kwargs)\n server = Server(config=config)\n\n if (config.reload or config.workers > 1) and not isinstance(app, str):\n logger = logging.getLogger(\"uvicorn.error\")\n logger.warning(\n \"You must pass the application as an import string to enable 'reload' or \"\n \"'workers'.\"\n )\n sys.exit(1)\n\n if config.should_reload:\n sock = config.bind_socket()\n supervisor = ChangeReload(config, target=server.run, sockets=[sock])\n supervisor.run()\n elif config.workers > 1:\n sock = config.bind_socket()\n supervisor = Multiprocess(config, target=server.run, sockets=[sock])\n supervisor.run()\n else:\n server.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "uvicorn/main.py" } ]
diff --git a/docs/deployment.md b/docs/deployment.md index cec0d8ef8..8aabe1b18 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -45,7 +45,7 @@ Options: --workers INTEGER Number of worker processes. Defaults to the $WEB_CONCURRENCY environment variable if - available. Not valid with --reload. + available, or 1. Not valid with --reload. --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] --http [auto|h11|httptools] HTTP protocol implementation. [default: diff --git a/docs/index.md b/docs/index.md index be1a096ca..ae587f2ee 100644 --- a/docs/index.md +++ b/docs/index.md @@ -115,7 +115,7 @@ Options: --workers INTEGER Number of worker processes. Defaults to the $WEB_CONCURRENCY environment variable if - available. Not valid with --reload. + available, or 1. Not valid with --reload. --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] --http [auto|h11|httptools] HTTP protocol implementation. [default: diff --git a/docs/settings.md b/docs/settings.md index 2a717ab93..8b8398f83 100644 --- a/docs/settings.md +++ b/docs/settings.md @@ -24,7 +24,7 @@ equivalent keyword arguments, eg. `uvicorn.run("example:app", port=5000, reload= ## Production -* `--workers <int>` - Use multiple worker processes. Defaults to the value of the `$WEB_CONCURRENCY` environment variable. +* `--workers <int>` - Use multiple worker processes. Defaults to the `$WEB_CONCURRENCY` environment variable if available, or 1. ## Logging diff --git a/uvicorn/main.py b/uvicorn/main.py index 6bfc631f0..d7e866ec4 100644 --- a/uvicorn/main.py +++ b/uvicorn/main.py @@ -90,7 +90,7 @@ def print_version(ctx, param, value): default=None, type=int, help="Number of worker processes. Defaults to the $WEB_CONCURRENCY environment" - " variable if available. Not valid with --reload.", + " variable if available, or 1. Not valid with --reload.", ) @click.option( "--loop",
plotly__dash-2175
[BUG] NoUpdate.is_no_update can't handle ndarray **Describe your context** currently running project using dash for data display, callbacks pass ndarrays of certain sections of data to view ``` dash 2.6.0 dash-bootstrap-components 1.2.0 dash-core-components 2.0.0 dash-daq 0.5.0 dash-extensions 0.1.5 dash-html-components 2.0.0 dash-table 5.0.0 ``` **Describe the bug** if callback passes an ndarray, NoUpdate.is_no_update on line 441 of _callback.py raises a ValueError, this issue was not seen in previous versions. ``` [2022-07-31 16:27:03,570] ERROR in app: Exception on /_dash-update-component [POST] Traceback (most recent call last): File "...\venv\lib\site-packages\flask\app.py", line 2077, in wsgi_app response = self.full_dispatch_request() File "...\venv\lib\site-packages\flask\app.py", line 1525, in full_dispatch_request rv = self.handle_user_exception(e) File "...\venv\lib\site-packages\flask\app.py", line 1523, in full_dispatch_request rv = self.dispatch_request() File "...\venv\lib\site-packages\flask\app.py", line 1509, in dispatch_request return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args) File "...\venv\lib\site-packages\dash\dash.py", line 1260, in dispatch ctx.run( File "...\venv\lib\site-packages\dash\_callback.py", line 441, in add_context if NoUpdate.is_no_update(output_value): ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() ``` Workaround for this is wrapping ndarray with list, like this: ``` return [myndArr] ```
[ { "content": "import collections\nfrom functools import wraps\n\nimport flask\n\nfrom .dependencies import (\n handle_callback_args,\n handle_grouped_callback_args,\n Output,\n)\nfrom .exceptions import (\n PreventUpdate,\n WildcardInLongCallback,\n DuplicateCallback,\n MissingLongCallbackManagerError,\n LongCallbackError,\n)\n\nfrom ._grouping import (\n flatten_grouping,\n make_grouping_by_index,\n grouping_len,\n)\nfrom ._utils import (\n create_callback_id,\n stringify_id,\n to_json,\n coerce_to_list,\n AttributeDict,\n)\n\nfrom . import _validate\nfrom .long_callback.managers import BaseLongCallbackManager\nfrom ._callback_context import context_value\n\n\nclass NoUpdate:\n def to_plotly_json(self): # pylint: disable=no-self-use\n return {\"_dash_no_update\": \"_dash_no_update\"}\n\n @staticmethod\n def is_no_update(obj):\n return isinstance(obj, NoUpdate) or obj == {\n \"_dash_no_update\": \"_dash_no_update\"\n }\n\n\nGLOBAL_CALLBACK_LIST = []\nGLOBAL_CALLBACK_MAP = {}\nGLOBAL_INLINE_SCRIPTS = []\n\n\n# pylint: disable=too-many-locals\ndef callback(\n *_args,\n background=False,\n interval=1000,\n progress=None,\n progress_default=None,\n running=None,\n cancel=None,\n manager=None,\n cache_args_to_ignore=None,\n **_kwargs,\n):\n \"\"\"\n Normally used as a decorator, `@dash.callback` provides a server-side\n callback relating the values of one or more `Output` items to one or\n more `Input` items which will trigger the callback when they change,\n and optionally `State` items which provide additional information but\n do not trigger the callback directly.\n\n `@dash.callback` is an alternative to `@app.callback` (where `app = dash.Dash()`)\n introduced in Dash 2.0.\n It allows you to register callbacks without defining or importing the `app`\n object. The call signature is identical and it can be used instead of `app.callback`\n in all cases.\n\n The last, optional argument `prevent_initial_call` causes the callback\n not to fire when its outputs are first added to the page. Defaults to\n `False` and unlike `app.callback` is not configurable at the app level.\n\n :Keyword Arguments:\n :param background:\n Mark the callback as a long callback to execute in a manager for\n callbacks that take a long time without locking up the Dash app\n or timing out.\n :param manager:\n A long callback manager instance. Currently, an instance of one of\n `DiskcacheManager` or `CeleryManager`.\n Defaults to the `background_callback_manager` instance provided to the\n `dash.Dash constructor`.\n - A diskcache manager (`DiskcacheManager`) that runs callback\n logic in a separate process and stores the results to disk using the\n diskcache library. This is the easiest backend to use for local\n development.\n - A Celery manager (`CeleryManager`) that runs callback logic\n in a celery worker and returns results to the Dash app through a Celery\n broker like RabbitMQ or Redis.\n :param running:\n A list of 3-element tuples. The first element of each tuple should be\n an `Output` dependency object referencing a property of a component in\n the app layout. The second element is the value that the property\n should be set to while the callback is running, and the third element\n is the value the property should be set to when the callback completes.\n :param cancel:\n A list of `Input` dependency objects that reference a property of a\n component in the app's layout. When the value of this property changes\n while a callback is running, the callback is canceled.\n Note that the value of the property is not significant, any change in\n value will result in the cancellation of the running job (if any).\n :param progress:\n An `Output` dependency grouping that references properties of\n components in the app's layout. When provided, the decorated function\n will be called with an extra argument as the first argument to the\n function. This argument, is a function handle that the decorated\n function should call in order to provide updates to the app on its\n current progress. This function accepts a single argument, which\n correspond to the grouping of properties specified in the provided\n `Output` dependency grouping\n :param progress_default:\n A grouping of values that should be assigned to the components\n specified by the `progress` argument when the callback is not in\n progress. If `progress_default` is not provided, all the dependency\n properties specified in `progress` will be set to `None` when the\n callback is not running.\n :param cache_args_to_ignore:\n Arguments to ignore when caching is enabled. If callback is configured\n with keyword arguments (Input/State provided in a dict),\n this should be a list of argument names as strings. Otherwise,\n this should be a list of argument indices as integers.\n :param interval:\n Time to wait between the long callback update requests.\n \"\"\"\n\n long_spec = None\n\n config_prevent_initial_callbacks = _kwargs.pop(\n \"config_prevent_initial_callbacks\", False\n )\n callback_map = _kwargs.pop(\"callback_map\", GLOBAL_CALLBACK_MAP)\n callback_list = _kwargs.pop(\"callback_list\", GLOBAL_CALLBACK_LIST)\n\n if background:\n long_spec = {\n \"interval\": interval,\n }\n\n if manager:\n long_spec[\"manager\"] = manager\n\n if progress:\n long_spec[\"progress\"] = coerce_to_list(progress)\n validate_long_inputs(long_spec[\"progress\"])\n\n if progress_default:\n long_spec[\"progressDefault\"] = coerce_to_list(progress_default)\n\n if not len(long_spec[\"progress\"]) == len(long_spec[\"progressDefault\"]):\n raise Exception(\n \"Progress and progress default needs to be of same length\"\n )\n\n if running:\n long_spec[\"running\"] = coerce_to_list(running)\n validate_long_inputs(x[0] for x in long_spec[\"running\"])\n\n if cancel:\n cancel_inputs = coerce_to_list(cancel)\n validate_long_inputs(cancel_inputs)\n\n cancels_output = [Output(c.component_id, \"id\") for c in cancel_inputs]\n\n try:\n\n @callback(cancels_output, cancel_inputs, prevent_initial_call=True)\n def cancel_call(*_):\n job_ids = flask.request.args.getlist(\"cancelJob\")\n executor = (\n manager or context_value.get().background_callback_manager\n )\n if job_ids:\n for job_id in job_ids:\n executor.terminate_job(job_id)\n return NoUpdate()\n\n except DuplicateCallback:\n pass # Already a callback to cancel, will get the proper jobs from the store.\n\n long_spec[\"cancel\"] = [c.to_dict() for c in cancel_inputs]\n\n if cache_args_to_ignore:\n long_spec[\"cache_args_to_ignore\"] = cache_args_to_ignore\n\n return register_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n *_args,\n **_kwargs,\n long=long_spec,\n )\n\n\ndef validate_long_inputs(deps):\n for dep in deps:\n if dep.has_wildcard():\n raise WildcardInLongCallback(\n f\"\"\"\n long callbacks does not support dependencies with\n pattern-matching ids\n Received: {repr(dep)}\\n\"\"\"\n )\n\n\ndef clientside_callback(clientside_function, *args, **kwargs):\n return register_clientside_callback(\n GLOBAL_CALLBACK_LIST,\n GLOBAL_CALLBACK_MAP,\n False,\n GLOBAL_INLINE_SCRIPTS,\n clientside_function,\n *args,\n **kwargs,\n )\n\n\ndef insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n output,\n outputs_indices,\n inputs,\n state,\n inputs_state_indices,\n prevent_initial_call,\n long=None,\n):\n if prevent_initial_call is None:\n prevent_initial_call = config_prevent_initial_callbacks\n\n callback_id = create_callback_id(output)\n callback_spec = {\n \"output\": callback_id,\n \"inputs\": [c.to_dict() for c in inputs],\n \"state\": [c.to_dict() for c in state],\n \"clientside_function\": None,\n \"prevent_initial_call\": prevent_initial_call,\n \"long\": long\n and {\n \"interval\": long[\"interval\"],\n },\n }\n\n callback_map[callback_id] = {\n \"inputs\": callback_spec[\"inputs\"],\n \"state\": callback_spec[\"state\"],\n \"outputs_indices\": outputs_indices,\n \"inputs_state_indices\": inputs_state_indices,\n \"long\": long,\n \"output\": output,\n \"raw_inputs\": inputs,\n }\n callback_list.append(callback_spec)\n\n return callback_id\n\n\n# pylint: disable=R0912, R0915\ndef register_callback( # pylint: disable=R0914\n callback_list, callback_map, config_prevent_initial_callbacks, *_args, **_kwargs\n):\n (\n output,\n flat_inputs,\n flat_state,\n inputs_state_indices,\n prevent_initial_call,\n ) = handle_grouped_callback_args(_args, _kwargs)\n if isinstance(output, Output):\n # Insert callback with scalar (non-multi) Output\n insert_output = output\n multi = False\n else:\n # Insert callback as multi Output\n insert_output = flatten_grouping(output)\n multi = True\n\n long = _kwargs.get(\"long\")\n\n output_indices = make_grouping_by_index(output, list(range(grouping_len(output))))\n callback_id = insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n insert_output,\n output_indices,\n flat_inputs,\n flat_state,\n inputs_state_indices,\n prevent_initial_call,\n long=long,\n )\n\n # pylint: disable=too-many-locals\n def wrap_func(func):\n\n if long is not None:\n long_key = BaseLongCallbackManager.register_func(\n func, long.get(\"progress\") is not None\n )\n\n @wraps(func)\n def add_context(*args, **kwargs):\n output_spec = kwargs.pop(\"outputs_list\")\n app_callback_manager = kwargs.pop(\"long_callback_manager\", None)\n callback_ctx = kwargs.pop(\"callback_context\", {})\n callback_manager = long and long.get(\"manager\", app_callback_manager)\n _validate.validate_output_spec(insert_output, output_spec, Output)\n\n context_value.set(callback_ctx)\n\n func_args, func_kwargs = _validate.validate_and_group_input_args(\n args, inputs_state_indices\n )\n\n response = {\"multi\": True}\n\n if long is not None:\n if not callback_manager:\n raise MissingLongCallbackManagerError(\n \"Running `long` callbacks requires a manager to be installed.\\n\"\n \"Available managers:\\n\"\n \"- Diskcache (`pip install dash[diskcache]`) to run callbacks in a separate Process\"\n \" and store results on the local filesystem.\\n\"\n \"- Celery (`pip install dash[celery]`) to run callbacks in a celery worker\"\n \" and store results on redis.\\n\"\n )\n\n progress_outputs = long.get(\"progress\")\n cache_key = flask.request.args.get(\"cacheKey\")\n job_id = flask.request.args.get(\"job\")\n old_job = flask.request.args.getlist(\"oldJob\")\n\n current_key = callback_manager.build_cache_key(\n func,\n # Inputs provided as dict is kwargs.\n func_args if func_args else func_kwargs,\n long.get(\"cache_args_to_ignore\", []),\n )\n\n if old_job:\n for job in old_job:\n callback_manager.terminate_job(job)\n\n if not cache_key:\n cache_key = current_key\n\n job_fn = callback_manager.func_registry.get(long_key)\n\n job = callback_manager.call_job_fn(\n cache_key,\n job_fn,\n args,\n AttributeDict(\n args_grouping=callback_ctx.args_grouping,\n using_args_grouping=callback_ctx.using_args_grouping,\n outputs_grouping=callback_ctx.outputs_grouping,\n using_outputs_grouping=callback_ctx.using_outputs_grouping,\n inputs_list=callback_ctx.inputs_list,\n states_list=callback_ctx.states_list,\n outputs_list=callback_ctx.outputs_list,\n input_values=callback_ctx.input_values,\n state_values=callback_ctx.state_values,\n triggered_inputs=callback_ctx.triggered_inputs,\n ),\n )\n\n data = {\n \"cacheKey\": cache_key,\n \"job\": job,\n }\n\n running = long.get(\"running\")\n\n if running:\n data[\"running\"] = {str(r[0]): r[1] for r in running}\n data[\"runningOff\"] = {str(r[0]): r[2] for r in running}\n cancel = long.get(\"cancel\")\n if cancel:\n data[\"cancel\"] = cancel\n\n progress_default = long.get(\"progressDefault\")\n if progress_default:\n data[\"progressDefault\"] = {\n str(o): x\n for o, x in zip(progress_outputs, progress_default)\n }\n return to_json(data)\n if progress_outputs:\n # Get the progress before the result as it would be erased after the results.\n progress = callback_manager.get_progress(cache_key)\n if progress:\n response[\"progress\"] = {\n str(x): progress[i] for i, x in enumerate(progress_outputs)\n }\n\n output_value = callback_manager.get_result(cache_key, job_id)\n # Must get job_running after get_result since get_results terminates it.\n job_running = callback_manager.job_running(job_id)\n if not job_running and output_value is callback_manager.UNDEFINED:\n # Job canceled -> no output to close the loop.\n output_value = NoUpdate()\n\n elif (\n isinstance(output_value, dict)\n and \"long_callback_error\" in output_value\n ):\n error = output_value.get(\"long_callback_error\")\n raise LongCallbackError(\n f\"An error occurred inside a long callback: {error['msg']}\\n{error['tb']}\"\n )\n\n if job_running and output_value is not callback_manager.UNDEFINED:\n # cached results.\n callback_manager.terminate_job(job_id)\n\n if multi and isinstance(output_value, (list, tuple)):\n output_value = [\n NoUpdate() if NoUpdate.is_no_update(r) else r\n for r in output_value\n ]\n\n if output_value is callback_manager.UNDEFINED:\n return to_json(response)\n else:\n # don't touch the comment on the next line - used by debugger\n output_value = func(*func_args, **func_kwargs) # %% callback invoked %%\n\n if NoUpdate.is_no_update(output_value):\n raise PreventUpdate\n\n if not multi:\n output_value, output_spec = [output_value], [output_spec]\n flat_output_values = output_value\n else:\n if isinstance(output_value, (list, tuple)):\n # For multi-output, allow top-level collection to be\n # list or tuple\n output_value = list(output_value)\n\n # Flatten grouping and validate grouping structure\n flat_output_values = flatten_grouping(output_value, output)\n\n _validate.validate_multi_return(\n output_spec, flat_output_values, callback_id\n )\n\n component_ids = collections.defaultdict(dict)\n has_update = False\n for val, spec in zip(flat_output_values, output_spec):\n if isinstance(val, NoUpdate):\n continue\n for vali, speci in (\n zip(val, spec) if isinstance(spec, list) else [[val, spec]]\n ):\n if not isinstance(vali, NoUpdate):\n has_update = True\n id_str = stringify_id(speci[\"id\"])\n component_ids[id_str][speci[\"property\"]] = vali\n\n if not has_update:\n raise PreventUpdate\n\n response[\"response\"] = component_ids\n\n try:\n jsonResponse = to_json(response)\n except TypeError:\n _validate.fail_callback_output(output_value, output)\n\n return jsonResponse\n\n callback_map[callback_id][\"callback\"] = add_context\n\n return func\n\n return wrap_func\n\n\n_inline_clientside_template = \"\"\"\nvar clientside = window.dash_clientside = window.dash_clientside || {{}};\nvar ns = clientside[\"{namespace}\"] = clientside[\"{namespace}\"] || {{}};\nns[\"{function_name}\"] = {clientside_function};\n\"\"\"\n\n\ndef register_clientside_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n inline_scripts,\n clientside_function,\n *args,\n **kwargs,\n):\n output, inputs, state, prevent_initial_call = handle_callback_args(args, kwargs)\n insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n output,\n None,\n inputs,\n state,\n None,\n prevent_initial_call,\n )\n\n # If JS source is explicitly given, create a namespace and function\n # name, then inject the code.\n if isinstance(clientside_function, str):\n\n out0 = output\n if isinstance(output, (list, tuple)):\n out0 = output[0]\n\n namespace = f\"_dashprivate_{out0.component_id}\"\n function_name = out0.component_property\n\n inline_scripts.append(\n _inline_clientside_template.format(\n namespace=namespace.replace('\"', '\\\\\"'),\n function_name=function_name.replace('\"', '\\\\\"'),\n clientside_function=clientside_function,\n )\n )\n\n # Callback is stored in an external asset.\n else:\n namespace = clientside_function.namespace\n function_name = clientside_function.function_name\n\n callback_list[-1][\"clientside_function\"] = {\n \"namespace\": namespace,\n \"function_name\": function_name,\n }\n", "path": "dash/_callback.py" } ]
[ { "content": "import collections\nfrom functools import wraps\n\nimport flask\n\nfrom .dependencies import (\n handle_callback_args,\n handle_grouped_callback_args,\n Output,\n)\nfrom .exceptions import (\n PreventUpdate,\n WildcardInLongCallback,\n DuplicateCallback,\n MissingLongCallbackManagerError,\n LongCallbackError,\n)\n\nfrom ._grouping import (\n flatten_grouping,\n make_grouping_by_index,\n grouping_len,\n)\nfrom ._utils import (\n create_callback_id,\n stringify_id,\n to_json,\n coerce_to_list,\n AttributeDict,\n)\n\nfrom . import _validate\nfrom .long_callback.managers import BaseLongCallbackManager\nfrom ._callback_context import context_value\n\n\nclass NoUpdate:\n def to_plotly_json(self): # pylint: disable=no-self-use\n return {\"_dash_no_update\": \"_dash_no_update\"}\n\n @staticmethod\n def is_no_update(obj):\n return isinstance(obj, NoUpdate) or (\n isinstance(obj, dict) and obj == {\"_dash_no_update\": \"_dash_no_update\"}\n )\n\n\nGLOBAL_CALLBACK_LIST = []\nGLOBAL_CALLBACK_MAP = {}\nGLOBAL_INLINE_SCRIPTS = []\n\n\n# pylint: disable=too-many-locals\ndef callback(\n *_args,\n background=False,\n interval=1000,\n progress=None,\n progress_default=None,\n running=None,\n cancel=None,\n manager=None,\n cache_args_to_ignore=None,\n **_kwargs,\n):\n \"\"\"\n Normally used as a decorator, `@dash.callback` provides a server-side\n callback relating the values of one or more `Output` items to one or\n more `Input` items which will trigger the callback when they change,\n and optionally `State` items which provide additional information but\n do not trigger the callback directly.\n\n `@dash.callback` is an alternative to `@app.callback` (where `app = dash.Dash()`)\n introduced in Dash 2.0.\n It allows you to register callbacks without defining or importing the `app`\n object. The call signature is identical and it can be used instead of `app.callback`\n in all cases.\n\n The last, optional argument `prevent_initial_call` causes the callback\n not to fire when its outputs are first added to the page. Defaults to\n `False` and unlike `app.callback` is not configurable at the app level.\n\n :Keyword Arguments:\n :param background:\n Mark the callback as a long callback to execute in a manager for\n callbacks that take a long time without locking up the Dash app\n or timing out.\n :param manager:\n A long callback manager instance. Currently, an instance of one of\n `DiskcacheManager` or `CeleryManager`.\n Defaults to the `background_callback_manager` instance provided to the\n `dash.Dash constructor`.\n - A diskcache manager (`DiskcacheManager`) that runs callback\n logic in a separate process and stores the results to disk using the\n diskcache library. This is the easiest backend to use for local\n development.\n - A Celery manager (`CeleryManager`) that runs callback logic\n in a celery worker and returns results to the Dash app through a Celery\n broker like RabbitMQ or Redis.\n :param running:\n A list of 3-element tuples. The first element of each tuple should be\n an `Output` dependency object referencing a property of a component in\n the app layout. The second element is the value that the property\n should be set to while the callback is running, and the third element\n is the value the property should be set to when the callback completes.\n :param cancel:\n A list of `Input` dependency objects that reference a property of a\n component in the app's layout. When the value of this property changes\n while a callback is running, the callback is canceled.\n Note that the value of the property is not significant, any change in\n value will result in the cancellation of the running job (if any).\n :param progress:\n An `Output` dependency grouping that references properties of\n components in the app's layout. When provided, the decorated function\n will be called with an extra argument as the first argument to the\n function. This argument, is a function handle that the decorated\n function should call in order to provide updates to the app on its\n current progress. This function accepts a single argument, which\n correspond to the grouping of properties specified in the provided\n `Output` dependency grouping\n :param progress_default:\n A grouping of values that should be assigned to the components\n specified by the `progress` argument when the callback is not in\n progress. If `progress_default` is not provided, all the dependency\n properties specified in `progress` will be set to `None` when the\n callback is not running.\n :param cache_args_to_ignore:\n Arguments to ignore when caching is enabled. If callback is configured\n with keyword arguments (Input/State provided in a dict),\n this should be a list of argument names as strings. Otherwise,\n this should be a list of argument indices as integers.\n :param interval:\n Time to wait between the long callback update requests.\n \"\"\"\n\n long_spec = None\n\n config_prevent_initial_callbacks = _kwargs.pop(\n \"config_prevent_initial_callbacks\", False\n )\n callback_map = _kwargs.pop(\"callback_map\", GLOBAL_CALLBACK_MAP)\n callback_list = _kwargs.pop(\"callback_list\", GLOBAL_CALLBACK_LIST)\n\n if background:\n long_spec = {\n \"interval\": interval,\n }\n\n if manager:\n long_spec[\"manager\"] = manager\n\n if progress:\n long_spec[\"progress\"] = coerce_to_list(progress)\n validate_long_inputs(long_spec[\"progress\"])\n\n if progress_default:\n long_spec[\"progressDefault\"] = coerce_to_list(progress_default)\n\n if not len(long_spec[\"progress\"]) == len(long_spec[\"progressDefault\"]):\n raise Exception(\n \"Progress and progress default needs to be of same length\"\n )\n\n if running:\n long_spec[\"running\"] = coerce_to_list(running)\n validate_long_inputs(x[0] for x in long_spec[\"running\"])\n\n if cancel:\n cancel_inputs = coerce_to_list(cancel)\n validate_long_inputs(cancel_inputs)\n\n cancels_output = [Output(c.component_id, \"id\") for c in cancel_inputs]\n\n try:\n\n @callback(cancels_output, cancel_inputs, prevent_initial_call=True)\n def cancel_call(*_):\n job_ids = flask.request.args.getlist(\"cancelJob\")\n executor = (\n manager or context_value.get().background_callback_manager\n )\n if job_ids:\n for job_id in job_ids:\n executor.terminate_job(job_id)\n return NoUpdate()\n\n except DuplicateCallback:\n pass # Already a callback to cancel, will get the proper jobs from the store.\n\n long_spec[\"cancel\"] = [c.to_dict() for c in cancel_inputs]\n\n if cache_args_to_ignore:\n long_spec[\"cache_args_to_ignore\"] = cache_args_to_ignore\n\n return register_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n *_args,\n **_kwargs,\n long=long_spec,\n )\n\n\ndef validate_long_inputs(deps):\n for dep in deps:\n if dep.has_wildcard():\n raise WildcardInLongCallback(\n f\"\"\"\n long callbacks does not support dependencies with\n pattern-matching ids\n Received: {repr(dep)}\\n\"\"\"\n )\n\n\ndef clientside_callback(clientside_function, *args, **kwargs):\n return register_clientside_callback(\n GLOBAL_CALLBACK_LIST,\n GLOBAL_CALLBACK_MAP,\n False,\n GLOBAL_INLINE_SCRIPTS,\n clientside_function,\n *args,\n **kwargs,\n )\n\n\ndef insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n output,\n outputs_indices,\n inputs,\n state,\n inputs_state_indices,\n prevent_initial_call,\n long=None,\n):\n if prevent_initial_call is None:\n prevent_initial_call = config_prevent_initial_callbacks\n\n callback_id = create_callback_id(output)\n callback_spec = {\n \"output\": callback_id,\n \"inputs\": [c.to_dict() for c in inputs],\n \"state\": [c.to_dict() for c in state],\n \"clientside_function\": None,\n \"prevent_initial_call\": prevent_initial_call,\n \"long\": long\n and {\n \"interval\": long[\"interval\"],\n },\n }\n\n callback_map[callback_id] = {\n \"inputs\": callback_spec[\"inputs\"],\n \"state\": callback_spec[\"state\"],\n \"outputs_indices\": outputs_indices,\n \"inputs_state_indices\": inputs_state_indices,\n \"long\": long,\n \"output\": output,\n \"raw_inputs\": inputs,\n }\n callback_list.append(callback_spec)\n\n return callback_id\n\n\n# pylint: disable=R0912, R0915\ndef register_callback( # pylint: disable=R0914\n callback_list, callback_map, config_prevent_initial_callbacks, *_args, **_kwargs\n):\n (\n output,\n flat_inputs,\n flat_state,\n inputs_state_indices,\n prevent_initial_call,\n ) = handle_grouped_callback_args(_args, _kwargs)\n if isinstance(output, Output):\n # Insert callback with scalar (non-multi) Output\n insert_output = output\n multi = False\n else:\n # Insert callback as multi Output\n insert_output = flatten_grouping(output)\n multi = True\n\n long = _kwargs.get(\"long\")\n\n output_indices = make_grouping_by_index(output, list(range(grouping_len(output))))\n callback_id = insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n insert_output,\n output_indices,\n flat_inputs,\n flat_state,\n inputs_state_indices,\n prevent_initial_call,\n long=long,\n )\n\n # pylint: disable=too-many-locals\n def wrap_func(func):\n\n if long is not None:\n long_key = BaseLongCallbackManager.register_func(\n func, long.get(\"progress\") is not None\n )\n\n @wraps(func)\n def add_context(*args, **kwargs):\n output_spec = kwargs.pop(\"outputs_list\")\n app_callback_manager = kwargs.pop(\"long_callback_manager\", None)\n callback_ctx = kwargs.pop(\"callback_context\", {})\n callback_manager = long and long.get(\"manager\", app_callback_manager)\n _validate.validate_output_spec(insert_output, output_spec, Output)\n\n context_value.set(callback_ctx)\n\n func_args, func_kwargs = _validate.validate_and_group_input_args(\n args, inputs_state_indices\n )\n\n response = {\"multi\": True}\n\n if long is not None:\n if not callback_manager:\n raise MissingLongCallbackManagerError(\n \"Running `long` callbacks requires a manager to be installed.\\n\"\n \"Available managers:\\n\"\n \"- Diskcache (`pip install dash[diskcache]`) to run callbacks in a separate Process\"\n \" and store results on the local filesystem.\\n\"\n \"- Celery (`pip install dash[celery]`) to run callbacks in a celery worker\"\n \" and store results on redis.\\n\"\n )\n\n progress_outputs = long.get(\"progress\")\n cache_key = flask.request.args.get(\"cacheKey\")\n job_id = flask.request.args.get(\"job\")\n old_job = flask.request.args.getlist(\"oldJob\")\n\n current_key = callback_manager.build_cache_key(\n func,\n # Inputs provided as dict is kwargs.\n func_args if func_args else func_kwargs,\n long.get(\"cache_args_to_ignore\", []),\n )\n\n if old_job:\n for job in old_job:\n callback_manager.terminate_job(job)\n\n if not cache_key:\n cache_key = current_key\n\n job_fn = callback_manager.func_registry.get(long_key)\n\n job = callback_manager.call_job_fn(\n cache_key,\n job_fn,\n args,\n AttributeDict(\n args_grouping=callback_ctx.args_grouping,\n using_args_grouping=callback_ctx.using_args_grouping,\n outputs_grouping=callback_ctx.outputs_grouping,\n using_outputs_grouping=callback_ctx.using_outputs_grouping,\n inputs_list=callback_ctx.inputs_list,\n states_list=callback_ctx.states_list,\n outputs_list=callback_ctx.outputs_list,\n input_values=callback_ctx.input_values,\n state_values=callback_ctx.state_values,\n triggered_inputs=callback_ctx.triggered_inputs,\n ),\n )\n\n data = {\n \"cacheKey\": cache_key,\n \"job\": job,\n }\n\n running = long.get(\"running\")\n\n if running:\n data[\"running\"] = {str(r[0]): r[1] for r in running}\n data[\"runningOff\"] = {str(r[0]): r[2] for r in running}\n cancel = long.get(\"cancel\")\n if cancel:\n data[\"cancel\"] = cancel\n\n progress_default = long.get(\"progressDefault\")\n if progress_default:\n data[\"progressDefault\"] = {\n str(o): x\n for o, x in zip(progress_outputs, progress_default)\n }\n return to_json(data)\n if progress_outputs:\n # Get the progress before the result as it would be erased after the results.\n progress = callback_manager.get_progress(cache_key)\n if progress:\n response[\"progress\"] = {\n str(x): progress[i] for i, x in enumerate(progress_outputs)\n }\n\n output_value = callback_manager.get_result(cache_key, job_id)\n # Must get job_running after get_result since get_results terminates it.\n job_running = callback_manager.job_running(job_id)\n if not job_running and output_value is callback_manager.UNDEFINED:\n # Job canceled -> no output to close the loop.\n output_value = NoUpdate()\n\n elif (\n isinstance(output_value, dict)\n and \"long_callback_error\" in output_value\n ):\n error = output_value.get(\"long_callback_error\")\n raise LongCallbackError(\n f\"An error occurred inside a long callback: {error['msg']}\\n{error['tb']}\"\n )\n\n if job_running and output_value is not callback_manager.UNDEFINED:\n # cached results.\n callback_manager.terminate_job(job_id)\n\n if multi and isinstance(output_value, (list, tuple)):\n output_value = [\n NoUpdate() if NoUpdate.is_no_update(r) else r\n for r in output_value\n ]\n\n if output_value is callback_manager.UNDEFINED:\n return to_json(response)\n else:\n # don't touch the comment on the next line - used by debugger\n output_value = func(*func_args, **func_kwargs) # %% callback invoked %%\n\n if NoUpdate.is_no_update(output_value):\n raise PreventUpdate\n\n if not multi:\n output_value, output_spec = [output_value], [output_spec]\n flat_output_values = output_value\n else:\n if isinstance(output_value, (list, tuple)):\n # For multi-output, allow top-level collection to be\n # list or tuple\n output_value = list(output_value)\n\n # Flatten grouping and validate grouping structure\n flat_output_values = flatten_grouping(output_value, output)\n\n _validate.validate_multi_return(\n output_spec, flat_output_values, callback_id\n )\n\n component_ids = collections.defaultdict(dict)\n has_update = False\n for val, spec in zip(flat_output_values, output_spec):\n if isinstance(val, NoUpdate):\n continue\n for vali, speci in (\n zip(val, spec) if isinstance(spec, list) else [[val, spec]]\n ):\n if not isinstance(vali, NoUpdate):\n has_update = True\n id_str = stringify_id(speci[\"id\"])\n component_ids[id_str][speci[\"property\"]] = vali\n\n if not has_update:\n raise PreventUpdate\n\n response[\"response\"] = component_ids\n\n try:\n jsonResponse = to_json(response)\n except TypeError:\n _validate.fail_callback_output(output_value, output)\n\n return jsonResponse\n\n callback_map[callback_id][\"callback\"] = add_context\n\n return func\n\n return wrap_func\n\n\n_inline_clientside_template = \"\"\"\nvar clientside = window.dash_clientside = window.dash_clientside || {{}};\nvar ns = clientside[\"{namespace}\"] = clientside[\"{namespace}\"] || {{}};\nns[\"{function_name}\"] = {clientside_function};\n\"\"\"\n\n\ndef register_clientside_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n inline_scripts,\n clientside_function,\n *args,\n **kwargs,\n):\n output, inputs, state, prevent_initial_call = handle_callback_args(args, kwargs)\n insert_callback(\n callback_list,\n callback_map,\n config_prevent_initial_callbacks,\n output,\n None,\n inputs,\n state,\n None,\n prevent_initial_call,\n )\n\n # If JS source is explicitly given, create a namespace and function\n # name, then inject the code.\n if isinstance(clientside_function, str):\n\n out0 = output\n if isinstance(output, (list, tuple)):\n out0 = output[0]\n\n namespace = f\"_dashprivate_{out0.component_id}\"\n function_name = out0.component_property\n\n inline_scripts.append(\n _inline_clientside_template.format(\n namespace=namespace.replace('\"', '\\\\\"'),\n function_name=function_name.replace('\"', '\\\\\"'),\n clientside_function=clientside_function,\n )\n )\n\n # Callback is stored in an external asset.\n else:\n namespace = clientside_function.namespace\n function_name = clientside_function.function_name\n\n callback_list[-1][\"clientside_function\"] = {\n \"namespace\": namespace,\n \"function_name\": function_name,\n }\n", "path": "dash/_callback.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a73465b95..ae4c6516dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ This project adheres to [Semantic Versioning](https://semver.org/). ## [UNRELEASED] +### Fixed + +- [#2175](https://github.com/plotly/dash/pull/2175) Fix [#2173](https://github.com/plotly/dash/issues/2173) callback output of ndarray and no_update check. - [#2146](https://github.com/plotly/dash/pull/2146) Remove leftover debug console.log statement. - [#2168](https://github.com/plotly/dash/pull/2168) Reverts [#2126](https://github.com/plotly/dash/pull/2126) (supporting redirect from root when using pages) until the new bugs introduced by that PR are fixed. diff --git a/dash/_callback.py b/dash/_callback.py index 6f48ca63c1..b23f14c6c3 100644 --- a/dash/_callback.py +++ b/dash/_callback.py @@ -40,9 +40,9 @@ def to_plotly_json(self): # pylint: disable=no-self-use @staticmethod def is_no_update(obj): - return isinstance(obj, NoUpdate) or obj == { - "_dash_no_update": "_dash_no_update" - } + return isinstance(obj, NoUpdate) or ( + isinstance(obj, dict) and obj == {"_dash_no_update": "_dash_no_update"} + ) GLOBAL_CALLBACK_LIST = [] diff --git a/tests/integration/callbacks/test_basic_callback.py b/tests/integration/callbacks/test_basic_callback.py index bd1cb479bf..709122dc6b 100644 --- a/tests/integration/callbacks/test_basic_callback.py +++ b/tests/integration/callbacks/test_basic_callback.py @@ -4,6 +4,7 @@ import pytest import time +import numpy as np import werkzeug from dash_test_components import ( @@ -765,3 +766,19 @@ def update_output(value): return f"returning {value}" assert update_output("my-value") == "returning my-value" + + +def test_cbsc018_callback_ndarray_output(dash_duo): + app = Dash(__name__) + app.layout = html.Div([dcc.Store(id="output"), html.Button("click", id="clicker")]) + + @app.callback( + Output("output", "data"), + Input("clicker", "n_clicks"), + ) + def on_click(_): + return np.array([[1, 2, 3], [4, 5, 6]], np.int32) + + dash_duo.start_server(app) + + assert dash_duo.get_logs() == []
wright-group__WrightTools-878
pcov TypeError in kit._leastsq In kit._leastsq, if the line 62 if statement is not passed, the consequent else statement makes pcov data type float, triggering"TypeError: 'int' object is not subscriptable" in line 72-73: 72: try: 73: error.append(np.absolute(pcov[i][i]) ** 0.5) Line 74 picks up index out of bound errors, not sure if it was meant to catch the type error. 74: except IndexError: 75: error.append(0.00) Error is bypassed if I put a 2D array into line 68, but have not spent the time considering what this array should look like.
[ { "content": "\"\"\"Least-square fitting tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom ._utilities import Timer\n\nimport numpy as np\n\nfrom scipy import optimize as scipy_optimize\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"leastsqfitter\"]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef leastsqfitter(p0, datax, datay, function, verbose=False, cov_verbose=False):\n \"\"\"Conveniently call scipy.optmize.leastsq().\n\n Returns fit parameters and their errors.\n\n Parameters\n ----------\n p0 : list\n list of guess parameters to pass to function\n datax : array\n array of independent values\n datay : array\n array of dependent values\n function : function\n function object to fit data to. Must be of the callable form function(p, x)\n verbose : bool\n toggles printing of fit time, fit params, and fit param errors\n cov_verbose : bool\n toggles printing of covarience matrix\n\n Returns\n -------\n pfit_leastsq : list\n list of fit parameters. s.t. the error between datay and function(p, datax) is minimized\n perr_leastsq : list\n list of fit parameter errors (1 std)\n \"\"\"\n timer = Timer(verbose=False)\n with timer:\n # define error function\n def errfunc(p, x, y):\n return y - function(p, x)\n\n # run optimization\n pfit_leastsq, pcov, infodict, errmsg, success = scipy_optimize.leastsq(\n errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001\n )\n # calculate covarience matrix\n # original idea https://stackoverflow.com/a/21844726\n if (len(datay) > len(p0)) and pcov is not None:\n s_sq = (errfunc(pfit_leastsq, datax, datay) ** 2).sum() / (len(datay) - len(p0))\n pcov = pcov * s_sq\n if cov_verbose:\n print(pcov)\n else:\n pcov = np.inf\n # calculate and write errors\n error = []\n for i in range(len(pfit_leastsq)):\n try:\n error.append(np.absolute(pcov[i][i]) ** 0.5)\n except IndexError:\n error.append(0.00)\n perr_leastsq = np.array(error)\n # exit\n if verbose:\n print(\"fit params: \", pfit_leastsq)\n print(\"fit params error: \", perr_leastsq)\n print(\"fitting done in %f seconds\" % timer.interval)\n return pfit_leastsq, perr_leastsq\n", "path": "WrightTools/kit/_leastsq.py" } ]
[ { "content": "\"\"\"Least-square fitting tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom ._utilities import Timer\n\nimport numpy as np\n\nfrom scipy import optimize as scipy_optimize\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"leastsqfitter\"]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef leastsqfitter(p0, datax, datay, function, verbose=False, cov_verbose=False):\n \"\"\"Conveniently call scipy.optmize.leastsq().\n\n Returns fit parameters and their errors.\n\n Parameters\n ----------\n p0 : list\n list of guess parameters to pass to function\n datax : array\n array of independent values\n datay : array\n array of dependent values\n function : function\n function object to fit data to. Must be of the callable form function(p, x)\n verbose : bool\n toggles printing of fit time, fit params, and fit param errors\n cov_verbose : bool\n toggles printing of covarience matrix\n\n Returns\n -------\n pfit_leastsq : list\n list of fit parameters. s.t. the error between datay and function(p, datax) is minimized\n perr_leastsq : list\n list of fit parameter errors (1 std)\n \"\"\"\n timer = Timer(verbose=False)\n with timer:\n # define error function\n def errfunc(p, x, y):\n return y - function(p, x)\n\n # run optimization\n pfit_leastsq, pcov, infodict, errmsg, success = scipy_optimize.leastsq(\n errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001\n )\n # calculate covarience matrix\n # original idea https://stackoverflow.com/a/21844726\n if (len(datay) > len(p0)) and pcov is not None:\n s_sq = (errfunc(pfit_leastsq, datax, datay) ** 2).sum() / (len(datay) - len(p0))\n pcov = pcov * s_sq\n if cov_verbose:\n print(pcov)\n else:\n pcov = np.array(np.inf)\n # calculate and write errors\n error = []\n for i in range(len(pfit_leastsq)):\n try:\n error.append(np.absolute(pcov[i][i]) ** 0.5)\n except IndexError:\n error.append(0.00)\n perr_leastsq = np.array(error)\n # exit\n if verbose:\n print(\"fit params: \", pfit_leastsq)\n print(\"fit params error: \", perr_leastsq)\n print(\"fitting done in %f seconds\" % timer.interval)\n return pfit_leastsq, perr_leastsq\n", "path": "WrightTools/kit/_leastsq.py" } ]
diff --git a/WrightTools/kit/_leastsq.py b/WrightTools/kit/_leastsq.py index 1c9393579..d448ba367 100644 --- a/WrightTools/kit/_leastsq.py +++ b/WrightTools/kit/_leastsq.py @@ -65,7 +65,7 @@ def errfunc(p, x, y): if cov_verbose: print(pcov) else: - pcov = np.inf + pcov = np.array(np.inf) # calculate and write errors error = [] for i in range(len(pfit_leastsq)): diff --git a/tests/kit/leastsqfitter.py b/tests/kit/leastsqfitter.py new file mode 100644 index 000000000..184eaba2b --- /dev/null +++ b/tests/kit/leastsqfitter.py @@ -0,0 +1,19 @@ +import numpy as np +import WrightTools as wt + + +def test_leastsq(): + x = np.linspace(0, 10) + y = np.linspace(10, 0) + fit, cov = wt.kit.leastsqfitter([0, 0], x, y, lambda p, x: p[0] * x + p[1]) + assert np.allclose(fit, [-1, 10]) + assert np.allclose(cov, [0, 0]) + + +def test_leastsq_no_corr(): + x = np.linspace(0, 10) + y = np.linspace(10, 0) + # The third parameter does not determine output, this caused an exception in wt <= 3.2.1 + fit, cov = wt.kit.leastsqfitter([0, 0, 0], x, y, lambda p, x: p[0] * x + p[1]) + assert np.allclose(fit, [-1, 10, 0]) + assert np.allclose(cov, [0, 0, 0])
spacetelescope__jwql-569
Write tests for bokeh templating software With the merge of #459, bokeh templating will be implemented for `jwql`. We should address the test coverage for this software.
[ { "content": "\"\"\"\nThis is a minimal example demonstrating how to create a Bokeh app using\nthe ``bokeh-templating`` package and the associated YAML template files.\n\nAuthor\n-------\n\n - Graham Kanarek\n\nDependencies\n------------\n\n The user must have PyYAML, Bokeh, and the ``bokeh-templating``\n packages installed.\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n\n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n file), we must initialize our ``a`` and ``b`` variables, and set\n the path to the interface file.\n \"\"\"\n\n self.a, self.b = 4, 2\n\n self.format_string = None\n self.interface_file = os.path.join(file_dir, \"example_interface.yaml\")\n\n # No post-initialization tasks are required.\n post_init = None\n\n @property\n def x(self):\n \"\"\"The x-value of the Lissajous curves.\"\"\"\n return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))\n\n @property\n def y(self):\n \"\"\"The y-value of the Lissajous curves.\"\"\"\n return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))\n\n def controller(self, attr, old, new):\n \"\"\"This is the controller function which is used to update the\n curves when the sliders are adjusted. Note the use of the\n ``self.refs`` dictionary for accessing the Bokeh object\n attributes.\"\"\"\n self.a = self.refs[\"a_slider\"].value\n self.b = self.refs[\"b_slider\"].value\n\n self.refs[\"figure_source\"].data = {'x': self.x, 'y': self.y}\n\n\nTestBokehApp()\n", "path": "jwql/bokeh_templating/example/main.py" } ]
[ { "content": "\"\"\"\nThis is a minimal example demonstrating how to create a Bokeh app using\nthe ``bokeh-templating`` package and the associated YAML template files.\n\nAuthor\n-------\n\n - Graham Kanarek\n\nDependencies\n------------\n\n The user must have PyYAML, Bokeh, and the ``bokeh-templating``\n packages installed.\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n \n _embed = True\n\n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n file), we must initialize our ``a`` and ``b`` variables, and set\n the path to the interface file.\n \"\"\"\n\n self.a, self.b = 4, 2\n\n self.format_string = None\n self.interface_file = os.path.join(file_dir, \"example_interface.yaml\")\n\n # No post-initialization tasks are required.\n post_init = None\n\n @property\n def x(self):\n \"\"\"The x-value of the Lissajous curves.\"\"\"\n return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))\n\n @property\n def y(self):\n \"\"\"The y-value of the Lissajous curves.\"\"\"\n return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))\n\n def controller(self, attr, old, new):\n \"\"\"This is the controller function which is used to update the\n curves when the sliders are adjusted. Note the use of the\n ``self.refs`` dictionary for accessing the Bokeh object\n attributes.\"\"\"\n self.a = self.refs[\"a_slider\"].value\n self.b = self.refs[\"b_slider\"].value\n\n self.refs[\"figure_source\"].data = {'x': self.x, 'y': self.y}\n\n\nTestBokehApp()\n", "path": "jwql/bokeh_templating/example/main.py" } ]
diff --git a/jwql/bokeh_templating/example/example_interface.yaml b/jwql/bokeh_templating/example/example_interface.yaml index 7e6a32e3e..4aec297c7 100644 --- a/jwql/bokeh_templating/example/example_interface.yaml +++ b/jwql/bokeh_templating/example/example_interface.yaml @@ -1,24 +1,15 @@ -- !ColumnDataSource: &dummy # This is a dummy ColumnDataSource used to trigger the controller method whenever a slider is changed. - data: - value: [] - on_change: ['data', !self.controller ] -- !CustomJS: &callback # This callback changes the value of the dummy ColumnDataSource data to trigger the controller method. - ref: "callback" - args: - source: *dummy - code: "\n source.data = { value: [cb_obj.value] }\n" - !Slider: &a_slider # a slider for the a value ref: "a_slider" title: "A" value: 4 range: !!python/tuple [1, 20, 0.1] - callback: *callback + on_change: ['value', !self.controller ] - !Slider: &b_slider # a slider for the b value ref: "b_slider" title: "B" value: 2 range: !!python/tuple [1, 20, 0.1] - callback: *callback + on_change: ['value', !self.controller ] - !ColumnDataSource: &figure_source # the ColumnDataSource for the figure ref: "figure_source" data: diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py index 3aa0ac856..770b4d4c3 100644 --- a/jwql/bokeh_templating/example/main.py +++ b/jwql/bokeh_templating/example/main.py @@ -24,6 +24,8 @@ class TestBokehApp(BokehTemplate): """This is a minimal ``BokehTemplate`` app.""" + + _embed = True def pre_init(self): """Before creating the Bokeh interface (by parsing the interface diff --git a/jwql/tests/test_bokeh_templating.py b/jwql/tests/test_bokeh_templating.py new file mode 100644 index 000000000..c513f9e10 --- /dev/null +++ b/jwql/tests/test_bokeh_templating.py @@ -0,0 +1,62 @@ +"""Tests for the ``bokeh_templating`` module. +Authors +------- + - Graham Kanarek +Use +--- + These tests can be run via the command line (omit the -s to + suppress verbose output to stdout): + :: + pytest -s test_bokeh_templating.py +""" + +import os +import numpy as np +from jwql.bokeh_templating import BokehTemplate +file_dir = os.path.dirname(os.path.realpath(__file__)) + + +class TestTemplate(BokehTemplate): + """ + A minimal BokehTemplate app for testing purposes. This is adapted from + the example included in the ``bokeh_template`` package. + """ + + _embed = True + + def pre_init(self): + """ + Before creating the Bokeh interface (by parsing the interface + file), we must initialize our ``a`` and ``b`` variables, and set + the path to the interface file. + """ + + self.a, self.b = 4, 2 + + self.format_string = None + self.interface_file = os.path.join(file_dir, "test_bokeh_tempating_interface.yaml") + + # No post-initialization tasks are required. + post_init = None + + @property + def x(self): + """The x-value of the Lissajous curves.""" + return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500)) + + @property + def y(self): + """The y-value of the Lissajous curves.""" + return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500)) + + def controller(self, attr, old, new): + """ + This is the controller function which is used to update the + curves when the sliders are adjusted. Note the use of the + ``self.refs`` dictionary for accessing the Bokeh object + attributes. + """ + self.a = self.refs["a_slider"].value + self.b = self.refs["b_slider"].value + + self.refs["figure_source"].data = {'x': self.x, 'y': self.y} diff --git a/jwql/tests/test_bokeh_templating_interface.yaml b/jwql/tests/test_bokeh_templating_interface.yaml new file mode 100644 index 000000000..4aec297c7 --- /dev/null +++ b/jwql/tests/test_bokeh_templating_interface.yaml @@ -0,0 +1,26 @@ +- !Slider: &a_slider # a slider for the a value + ref: "a_slider" + title: "A" + value: 4 + range: !!python/tuple [1, 20, 0.1] + on_change: ['value', !self.controller ] +- !Slider: &b_slider # a slider for the b value + ref: "b_slider" + title: "B" + value: 2 + range: !!python/tuple [1, 20, 0.1] + on_change: ['value', !self.controller ] +- !ColumnDataSource: &figure_source # the ColumnDataSource for the figure + ref: "figure_source" + data: + x: !self.x + y: !self.y +- !Figure: &the_figure # the Figure itself, which includes a single line element. + ref: 'the_figure' + elements: + - {'kind': 'line', 'source': *figure_source, 'line_color': 'orange', 'line_width': 2} +- !Document: # the Bokeh document layout: a single column with the figure and two sliders + - !column: + - *the_figure # note the use of YAML anchors to add the Bokeh objects to the Document layout directly. + - *a_slider + - *b_slider \ No newline at end of file
scipy__scipy-10353
BUG: interpolate.NearestNDInterpolator with pandas interpolate.NearestNDInterpolator does not work as expected when used with selected pandas dataframe. This is due to the index being maintained when making selections in pandas. ### Reproducing code example: ``` import numpy as np import pandas as pd from scipy import interpolate df = pd.DataFrame(np.array([[0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 2]]).T, columns=['x', 'y', 'z']) df_select = df[3:] NI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), df_select.z) print(NI([0.1, 0.9], [0.1, 0.9])) ``` I expect [0, 2] to be output. But output is [Nan, 0] as pandas.Series. This is due to the index being maintained when making selections in pandas. Specifically, `df_select.z` has index[3, 4, 5, 6]. But, self.tree.query (xi) line 81, in scipy/interpolate/ndgriddata.py returns a index that assumes that the index starts from zero. So, self.tree.query (xi) return [0, 3] Therefore, self.values[i] line 82, in scipy/interpolate/ndgriddata.py using Invalid index. ### Note if case of ``` df_select = df[3:].reset_index() ``` or ``` NI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), np.array(df_select.z)) ``` it works as expected. Also, this bug does not occur in interpolate.LinearNDInterpolator. ### Scipy/Numpy/Python version information: ``` 1.3.0 1.16.4 sys.version_info(major=3, minor=6, micro=8, releaselevel='final', serial=0) ```
[ { "content": "\"\"\"\nConvenience interface to N-D interpolation\n\n.. versionadded:: 0.9\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .interpnd import LinearNDInterpolator, NDInterpolatorBase, \\\n CloughTocher2DInterpolator, _ndim_coords_from_arrays\nfrom scipy.spatial import cKDTree\n\n__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',\n 'CloughTocher2DInterpolator']\n\n#------------------------------------------------------------------------------\n# Nearest-neighbour interpolation\n#------------------------------------------------------------------------------\n\n\nclass NearestNDInterpolator(NDInterpolatorBase):\n \"\"\"\n NearestNDInterpolator(x, y)\n\n Nearest-neighbour interpolation in N dimensions.\n\n .. versionadded:: 0.9\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x : (Npoints, Ndims) ndarray of floats\n Data point coordinates.\n y : (Npoints,) ndarray of float or complex\n Data values.\n rescale : boolean, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n tree_options : dict, optional\n Options passed to the underlying ``cKDTree``.\n\n .. versionadded:: 0.17.0\n\n\n Notes\n -----\n Uses ``scipy.spatial.cKDTree``\n\n \"\"\"\n\n def __init__(self, x, y, rescale=False, tree_options=None):\n NDInterpolatorBase.__init__(self, x, y, rescale=rescale,\n need_contiguous=False,\n need_values=False)\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n self.values = y\n\n def __call__(self, *args):\n \"\"\"\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n \"\"\"\n xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n xi = self._check_call_shape(xi)\n xi = self._scale_x(xi)\n dist, i = self.tree.query(xi)\n return self.values[i]\n\n\n#------------------------------------------------------------------------------\n# Convenience interface function\n#------------------------------------------------------------------------------\n\ndef griddata(points, values, xi, method='linear', fill_value=np.nan,\n rescale=False):\n \"\"\"\n Interpolate unstructured D-dimensional data.\n\n Parameters\n ----------\n points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).\n Data point coordinates. \n values : ndarray of float or complex, shape (n,)\n Data values.\n xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n\n ``nearest``\n return the value at the data point closest to\n the point of interpolation. See `NearestNDInterpolator` for\n more details.\n\n ``linear``\n tessellate the input point set to n-dimensional\n simplices, and interpolate linearly on each simplex. See\n `LinearNDInterpolator` for more details.\n\n ``cubic`` (1-D)\n return the value determined from a cubic\n spline.\n\n ``cubic`` (2-D)\n return the value determined from a\n piecewise cubic, continuously differentiable (C1), and\n approximately curvature-minimizing polynomial surface. See\n `CloughTocher2DInterpolator` for more details.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then the\n default is ``nan``. This option has no effect for the\n 'nearest' method.\n rescale : bool, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n \n Returns\n -------\n ndarray\n Array of interpolated values.\n\n Notes\n -----\n\n .. versionadded:: 0.9\n\n Examples\n --------\n\n Suppose we want to interpolate the 2-D function\n\n >>> def func(x, y):\n ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2\n\n on a grid in [0, 1]x[0, 1]\n\n >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]\n\n but we only know its values at 1000 data points:\n\n >>> points = np.random.rand(1000, 2)\n >>> values = func(points[:,0], points[:,1])\n\n This can be done with `griddata` -- below we try out all of the\n interpolation methods:\n\n >>> from scipy.interpolate import griddata\n >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')\n >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')\n >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')\n\n One can see that the exact result is reproduced by all of the\n methods to some degree, but for this smooth function the piecewise\n cubic interpolant gives the best results:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(221)\n >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')\n >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)\n >>> plt.title('Original')\n >>> plt.subplot(222)\n >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Nearest')\n >>> plt.subplot(223)\n >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Linear')\n >>> plt.subplot(224)\n >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Cubic')\n >>> plt.gcf().set_size_inches(6, 6)\n >>> plt.show()\n\n \"\"\"\n\n points = _ndim_coords_from_arrays(points)\n\n if points.ndim < 2:\n ndim = points.ndim\n else:\n ndim = points.shape[-1]\n\n if ndim == 1 and method in ('nearest', 'linear', 'cubic'):\n from .interpolate import interp1d\n points = points.ravel()\n if isinstance(xi, tuple):\n if len(xi) != 1:\n raise ValueError(\"invalid number of dimensions in xi\")\n xi, = xi\n # Sort points/values together, necessary as input for interp1d\n idx = np.argsort(points)\n points = points[idx]\n values = values[idx]\n if method == 'nearest':\n fill_value = 'extrapolate'\n ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,\n fill_value=fill_value)\n return ip(xi)\n elif method == 'nearest':\n ip = NearestNDInterpolator(points, values, rescale=rescale)\n return ip(xi)\n elif method == 'linear':\n ip = LinearNDInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n elif method == 'cubic' and ndim == 2:\n ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n else:\n raise ValueError(\"Unknown interpolation method %r for \"\n \"%d dimensional data\" % (method, ndim))\n", "path": "scipy/interpolate/ndgriddata.py" } ]
[ { "content": "\"\"\"\nConvenience interface to N-D interpolation\n\n.. versionadded:: 0.9\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .interpnd import LinearNDInterpolator, NDInterpolatorBase, \\\n CloughTocher2DInterpolator, _ndim_coords_from_arrays\nfrom scipy.spatial import cKDTree\n\n__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',\n 'CloughTocher2DInterpolator']\n\n#------------------------------------------------------------------------------\n# Nearest-neighbour interpolation\n#------------------------------------------------------------------------------\n\n\nclass NearestNDInterpolator(NDInterpolatorBase):\n \"\"\"\n NearestNDInterpolator(x, y)\n\n Nearest-neighbour interpolation in N dimensions.\n\n .. versionadded:: 0.9\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x : (Npoints, Ndims) ndarray of floats\n Data point coordinates.\n y : (Npoints,) ndarray of float or complex\n Data values.\n rescale : boolean, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n tree_options : dict, optional\n Options passed to the underlying ``cKDTree``.\n\n .. versionadded:: 0.17.0\n\n\n Notes\n -----\n Uses ``scipy.spatial.cKDTree``\n\n \"\"\"\n\n def __init__(self, x, y, rescale=False, tree_options=None):\n NDInterpolatorBase.__init__(self, x, y, rescale=rescale,\n need_contiguous=False,\n need_values=False)\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n self.values = np.asarray(y)\n\n def __call__(self, *args):\n \"\"\"\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n \"\"\"\n xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n xi = self._check_call_shape(xi)\n xi = self._scale_x(xi)\n dist, i = self.tree.query(xi)\n return self.values[i]\n\n\n#------------------------------------------------------------------------------\n# Convenience interface function\n#------------------------------------------------------------------------------\n\ndef griddata(points, values, xi, method='linear', fill_value=np.nan,\n rescale=False):\n \"\"\"\n Interpolate unstructured D-dimensional data.\n\n Parameters\n ----------\n points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).\n Data point coordinates. \n values : ndarray of float or complex, shape (n,)\n Data values.\n xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n\n ``nearest``\n return the value at the data point closest to\n the point of interpolation. See `NearestNDInterpolator` for\n more details.\n\n ``linear``\n tessellate the input point set to n-dimensional\n simplices, and interpolate linearly on each simplex. See\n `LinearNDInterpolator` for more details.\n\n ``cubic`` (1-D)\n return the value determined from a cubic\n spline.\n\n ``cubic`` (2-D)\n return the value determined from a\n piecewise cubic, continuously differentiable (C1), and\n approximately curvature-minimizing polynomial surface. See\n `CloughTocher2DInterpolator` for more details.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then the\n default is ``nan``. This option has no effect for the\n 'nearest' method.\n rescale : bool, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n \n Returns\n -------\n ndarray\n Array of interpolated values.\n\n Notes\n -----\n\n .. versionadded:: 0.9\n\n Examples\n --------\n\n Suppose we want to interpolate the 2-D function\n\n >>> def func(x, y):\n ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2\n\n on a grid in [0, 1]x[0, 1]\n\n >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]\n\n but we only know its values at 1000 data points:\n\n >>> points = np.random.rand(1000, 2)\n >>> values = func(points[:,0], points[:,1])\n\n This can be done with `griddata` -- below we try out all of the\n interpolation methods:\n\n >>> from scipy.interpolate import griddata\n >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')\n >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')\n >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')\n\n One can see that the exact result is reproduced by all of the\n methods to some degree, but for this smooth function the piecewise\n cubic interpolant gives the best results:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(221)\n >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')\n >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)\n >>> plt.title('Original')\n >>> plt.subplot(222)\n >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Nearest')\n >>> plt.subplot(223)\n >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Linear')\n >>> plt.subplot(224)\n >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Cubic')\n >>> plt.gcf().set_size_inches(6, 6)\n >>> plt.show()\n\n \"\"\"\n\n points = _ndim_coords_from_arrays(points)\n\n if points.ndim < 2:\n ndim = points.ndim\n else:\n ndim = points.shape[-1]\n\n if ndim == 1 and method in ('nearest', 'linear', 'cubic'):\n from .interpolate import interp1d\n points = points.ravel()\n if isinstance(xi, tuple):\n if len(xi) != 1:\n raise ValueError(\"invalid number of dimensions in xi\")\n xi, = xi\n # Sort points/values together, necessary as input for interp1d\n idx = np.argsort(points)\n points = points[idx]\n values = values[idx]\n if method == 'nearest':\n fill_value = 'extrapolate'\n ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,\n fill_value=fill_value)\n return ip(xi)\n elif method == 'nearest':\n ip = NearestNDInterpolator(points, values, rescale=rescale)\n return ip(xi)\n elif method == 'linear':\n ip = LinearNDInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n elif method == 'cubic' and ndim == 2:\n ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n else:\n raise ValueError(\"Unknown interpolation method %r for \"\n \"%d dimensional data\" % (method, ndim))\n", "path": "scipy/interpolate/ndgriddata.py" } ]
diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py index 023c439f8451..e7f9fddd7e44 100644 --- a/scipy/interpolate/ndgriddata.py +++ b/scipy/interpolate/ndgriddata.py @@ -62,7 +62,7 @@ def __init__(self, x, y, rescale=False, tree_options=None): if tree_options is None: tree_options = dict() self.tree = cKDTree(self.points, **tree_options) - self.values = y + self.values = np.asarray(y) def __call__(self, *args): """ diff --git a/scipy/interpolate/tests/test_ndgriddata.py b/scipy/interpolate/tests/test_ndgriddata.py index df4d552beace..eb3c0fa6b168 100644 --- a/scipy/interpolate/tests/test_ndgriddata.py +++ b/scipy/interpolate/tests/test_ndgriddata.py @@ -175,3 +175,17 @@ def test_nearest_options(): nndi_o = NearestNDInterpolator(x, y, tree_options=opts) assert_allclose(nndi(x), nndi_o(x), atol=1e-14) + +def test_nearest_list_argument(): + nd = np.array([[0, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 2]]) + d = nd[:, 3:] + + # z is np.array + NI = NearestNDInterpolator((d[0], d[1]), d[2]) + assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2]) + + # z is list + NI = NearestNDInterpolator((d[0], d[1]), list(d[2])) + assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
ibis-project__ibis-8397
feat: SQL Server Hierarchical Column Support ### Is your feature request related to a problem? I am trying SQL Server IBIS on Adventure Works SQL Server sample OLTP DB, as it has complex datatypes https://learn.microsoft.com/en-us/sql/samples/adventureworks-install-configure?view=sql-server-ver16&tabs=ssms I discovered an issue when I try to read the schema of [HumanResources].[Employee], and discovered it has columns, which is a [hierarchyid](https://learn.microsoft.com/en-us/sql/relational-databases/hierarchical-data-sql-server?view=sql-server-ver16) datatype. Sample to Simulate the issue DDL of table is ``` CREATE TABLE [dbo].[SimpleDemo] ( [Node] hierarchyid NOT NULL, [Level] AS ([Node].[GetLevel]()), [Location] nvarchar(30) NOT NULL, [LocationType] nvarchar(9) NULL ); INSERT INTO [dbo].[SimpleDemo] ( [Node], [Location], [LocationType] ) VALUES ('/', 'Earth', 'Planet'), ('/1/', 'Europe', 'Continent'), ('/2/', 'South America', 'Continent'), ('/1/1/', 'France', 'Country'), ('/1/1/1/', 'Paris', 'City'), ('/1/2/1/', 'Madrid', 'City'), ('/1/2/', 'Spain', 'Country'), ('/3/', 'Antarctica', 'Continent'), ('/2/1/', 'Brazil', 'Country'), ('/2/1/1/', 'Brasilia', 'City'), ('/2/1/2/', 'Bahia', 'State'), ('/2/1/2/1/', 'Salvador', 'City'), ('/3/1/', 'McMurdo Station', 'City'); ``` Notice the `Node` & `Level` Columns, which is a [hierarchyid](https://learn.microsoft.com/en-us/sql/relational-databases/hierarchical-data-sql-server?view=sql-server-ver16) datatype related. When I try to get the table info, I get error ```src_con.table(name='SimpleDemo', schema='dbo', database='AW_OLTP')``` ``` ProgrammingError: (pyodbc.ProgrammingError) ('42000', "[42000] [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]The request for procedure 'SimpleDemo' failed because 'SimpleDemo' is a table object. (2809) (SQLExecDirectW)") [SQL: EXEC sp_describe_first_result_set @tsql = ?] [parameters: ('dbo.[SimpleDemo]',)] (Background on this error at: https://sqlalche.me/e/20/f405) ``` As per the error, when I check ``` ###/usr/local/lib/python3.10/dist-packages/ibis/backends/base/sql/alchemy/__init__.py nulltype_cols = frozenset( col.name for col in table.c if isinstance(col.type, sa.types.NullType) ) if not nulltype_cols: return table return self._handle_failed_column_type_inference(table, nulltype_cols) ``` ### Describe the solution you'd like I see that we are tying to handle it, If you can do a simple fix that would be great, as ``` select CAST(Node AS nvarchar(100)) AS Node, Level, Location, LocationType from [dbo].[SimpleDemo] ``` - Node is a VARCHAR that needs CAST - Level is a INTEGER that also can be CAST ### What version of ibis are you running? 8.0.0 ### What backend(s) are you using, if any? SQL Server ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import NoReturn\n\nimport sqlglot as sg\nimport sqlglot.expressions as sge\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nfrom ibis.common.collections import FrozenDict\nfrom ibis.formats import TypeMapper\n\ntypecode = sge.DataType.Type\n\n_from_sqlglot_types = {\n typecode.BIGDECIMAL: partial(dt.Decimal, 76, 38),\n typecode.BIGINT: dt.Int64,\n typecode.BINARY: dt.Binary,\n typecode.BOOLEAN: dt.Boolean,\n typecode.CHAR: dt.String,\n typecode.DATE: dt.Date,\n typecode.DOUBLE: dt.Float64,\n typecode.ENUM: dt.String,\n typecode.ENUM8: dt.String,\n typecode.ENUM16: dt.String,\n typecode.FLOAT: dt.Float32,\n typecode.FIXEDSTRING: dt.String,\n typecode.GEOMETRY: partial(dt.GeoSpatial, geotype=\"geometry\"),\n typecode.GEOGRAPHY: partial(dt.GeoSpatial, geotype=\"geography\"),\n typecode.HSTORE: partial(dt.Map, dt.string, dt.string),\n typecode.INET: dt.INET,\n typecode.INT128: partial(dt.Decimal, 38, 0),\n typecode.INT256: partial(dt.Decimal, 76, 0),\n typecode.INT: dt.Int32,\n typecode.IPADDRESS: dt.INET,\n typecode.JSON: dt.JSON,\n typecode.JSONB: dt.JSON,\n typecode.LONGBLOB: dt.Binary,\n typecode.LONGTEXT: dt.String,\n typecode.MEDIUMBLOB: dt.Binary,\n typecode.MEDIUMTEXT: dt.String,\n typecode.MONEY: dt.Int64,\n typecode.NCHAR: dt.String,\n typecode.UUID: dt.UUID,\n typecode.NULL: dt.Null,\n typecode.NVARCHAR: dt.String,\n typecode.OBJECT: partial(dt.Map, dt.string, dt.json),\n typecode.SMALLINT: dt.Int16,\n typecode.SMALLMONEY: dt.Int32,\n typecode.TEXT: dt.String,\n typecode.TIME: dt.Time,\n typecode.TIMETZ: dt.Time,\n typecode.TINYBLOB: dt.Binary,\n typecode.TINYINT: dt.Int8,\n typecode.TINYTEXT: dt.String,\n typecode.UBIGINT: dt.UInt64,\n typecode.UINT: dt.UInt32,\n typecode.USMALLINT: dt.UInt16,\n typecode.UTINYINT: dt.UInt8,\n typecode.UUID: dt.UUID,\n typecode.VARBINARY: dt.Binary,\n typecode.VARCHAR: dt.String,\n typecode.VARIANT: dt.JSON,\n typecode.UNIQUEIDENTIFIER: dt.UUID,\n typecode.SET: partial(dt.Array, dt.string),\n #############################\n # Unsupported sqlglot types #\n #############################\n # BIT = auto() # mysql\n # BIGSERIAL = auto()\n # DATETIME64 = auto() # clickhouse\n # ENUM = auto()\n # INT4RANGE = auto()\n # INT4MULTIRANGE = auto()\n # INT8RANGE = auto()\n # INT8MULTIRANGE = auto()\n # NUMRANGE = auto()\n # NUMMULTIRANGE = auto()\n # TSRANGE = auto()\n # TSMULTIRANGE = auto()\n # TSTZRANGE = auto()\n # TSTZMULTIRANGE = auto()\n # DATERANGE = auto()\n # DATEMULTIRANGE = auto()\n # HLLSKETCH = auto()\n # IMAGE = auto()\n # IPPREFIX = auto()\n # ROWVERSION = auto()\n # SERIAL = auto()\n # SET = auto()\n # SMALLSERIAL = auto()\n # SUPER = auto()\n # TIMESTAMPLTZ = auto()\n # UNKNOWN = auto() # Sentinel value, useful for type annotation\n # UINT128 = auto()\n # UINT256 = auto()\n # USERDEFINED = \"USER-DEFINED\"\n # XML = auto()\n}\n\n_to_sqlglot_types = {\n dt.Null: typecode.NULL,\n dt.Boolean: typecode.BOOLEAN,\n dt.Int8: typecode.TINYINT,\n dt.Int16: typecode.SMALLINT,\n dt.Int32: typecode.INT,\n dt.Int64: typecode.BIGINT,\n dt.UInt8: typecode.UTINYINT,\n dt.UInt16: typecode.USMALLINT,\n dt.UInt32: typecode.UINT,\n dt.UInt64: typecode.UBIGINT,\n dt.Float16: typecode.FLOAT,\n dt.Float32: typecode.FLOAT,\n dt.Float64: typecode.DOUBLE,\n dt.String: typecode.VARCHAR,\n dt.Binary: typecode.VARBINARY,\n dt.JSON: typecode.JSON,\n dt.INET: typecode.INET,\n dt.UUID: typecode.UUID,\n dt.MACADDR: typecode.VARCHAR,\n dt.Date: typecode.DATE,\n dt.Time: typecode.TIME,\n}\n\n\nclass SqlglotType(TypeMapper):\n dialect: str | None = None\n \"\"\"The dialect this parser is for.\"\"\"\n\n default_nullable = True\n \"\"\"Default nullability when not specified.\"\"\"\n\n default_decimal_precision: int | None = None\n \"\"\"Default decimal precision when not specified.\"\"\"\n\n default_decimal_scale: int | None = None\n \"\"\"Default decimal scale when not specified.\"\"\"\n\n default_temporal_scale: int | None = None\n \"\"\"Default temporal scale when not specified.\"\"\"\n\n default_interval_precision: str | None = None\n \"\"\"Default interval precision when not specified.\"\"\"\n\n unknown_type_strings: dict[str, dt.DataType] = {}\n \"\"\"String to ibis datatype mapping to use when converting unknown types.\"\"\"\n\n @classmethod\n def to_ibis(cls, typ: sge.DataType, nullable: bool | None = None) -> dt.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n typecode = typ.this\n\n # broken sqlglot thing\n if isinstance(typecode, sge.Interval):\n typ = sge.DataType(\n this=sge.DataType.Type.INTERVAL,\n expressions=[sge.IntervalSpan(this=typecode.unit)],\n )\n typecode = typ.this\n\n if method := getattr(cls, f\"_from_sqlglot_{typecode.name}\", None):\n dtype = method(*typ.expressions)\n else:\n dtype = _from_sqlglot_types[typecode](nullable=cls.default_nullable)\n\n if nullable is not None:\n return dtype.copy(nullable=nullable)\n else:\n return dtype\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sge.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n\n if method := getattr(cls, f\"_from_ibis_{dtype.name}\", None):\n return method(dtype)\n else:\n return sge.DataType(this=_to_sqlglot_types[type(dtype)])\n\n @classmethod\n def from_string(cls, text: str, nullable: bool | None = None) -> dt.DataType:\n if dtype := cls.unknown_type_strings.get(text.lower()):\n return dtype\n\n sgtype = sg.parse_one(text, into=sge.DataType, read=cls.dialect)\n return cls.to_ibis(sgtype, nullable=nullable)\n\n @classmethod\n def to_string(cls, dtype: dt.DataType) -> str:\n return cls.from_ibis(dtype).sql(dialect=cls.dialect)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type: sge.DataType) -> dt.Array:\n return dt.Array(cls.to_ibis(value_type), nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_MAP(\n cls, key_type: sge.DataType, value_type: sge.DataType\n ) -> dt.Map:\n return dt.Map(\n cls.to_ibis(key_type),\n cls.to_ibis(value_type),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_STRUCT(cls, *fields: sge.ColumnDef) -> dt.Struct:\n types = {}\n for i, field in enumerate(fields):\n if isinstance(field, sge.ColumnDef):\n types[field.name] = cls.to_ibis(field.args[\"kind\"])\n else:\n types[f\"f{i:d}\"] = cls.from_string(str(field))\n return dt.Struct(types, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMPTZ(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=\"UTC\",\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMPLTZ(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=\"UTC\",\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_INTERVAL(\n cls, precision_or_span: sge.IntervalSpan | None = None\n ) -> dt.Interval:\n nullable = cls.default_nullable\n if precision_or_span is None:\n precision_or_span = cls.default_interval_precision\n\n if isinstance(precision_or_span, str):\n return dt.Interval(precision_or_span, nullable=nullable)\n elif isinstance(precision_or_span, sge.IntervalSpan):\n if (expression := precision_or_span.expression) is not None:\n unit = expression.this\n else:\n unit = precision_or_span.this.this\n return dt.Interval(unit=unit, nullable=nullable)\n elif isinstance(precision_or_span, sge.Var):\n return dt.Interval(unit=precision_or_span.this, nullable=nullable)\n elif precision_or_span is None:\n raise com.IbisTypeError(\"Interval precision is None\")\n else:\n raise com.IbisTypeError(precision_or_span)\n\n @classmethod\n def _from_sqlglot_DECIMAL(\n cls,\n precision: sge.DataTypeParam | None = None,\n scale: sge.DataTypeParam | None = None,\n ) -> dt.Decimal:\n if precision is None:\n precision = cls.default_decimal_precision\n else:\n precision = int(precision.this.this)\n\n if scale is None:\n scale = cls.default_decimal_scale\n else:\n scale = int(scale.this.this)\n\n return dt.Decimal(precision, scale, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOMETRY(\n cls, arg: sge.DataTypeParam | None = None\n ) -> sge.DataType:\n if arg is not None:\n return getattr(dt, str(arg))(nullable=cls.default_nullable)\n return dt.GeoSpatial(geotype=\"geometry\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOGRAPHY(cls) -> sge.DataType:\n return dt.GeoSpatial(geotype=\"geography\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Interval(cls, dtype: dt.Interval) -> sge.DataType:\n assert dtype.unit is not None, \"interval unit cannot be None\"\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[sge.Var(this=dtype.unit.name)],\n )\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> sge.DataType:\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(this=typecode.ARRAY, expressions=[value_type], nested=True)\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n key_type = cls.from_ibis(dtype.key_type)\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(\n this=typecode.MAP, expressions=[key_type, value_type], nested=True\n )\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n fields = [\n sge.ColumnDef(this=str(name), kind=cls.from_ibis(field))\n for name, field in dtype.items()\n ]\n return sge.DataType(this=typecode.STRUCT, expressions=fields, nested=True)\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n if (precision := dtype.precision) is None:\n precision = cls.default_decimal_precision\n\n if (scale := dtype.scale) is None:\n scale = cls.default_decimal_scale\n\n expressions = []\n\n if precision is not None:\n expressions.append(sge.DataTypeParam(this=sge.Literal.number(precision)))\n\n if scale is not None:\n if precision is None:\n raise com.IbisTypeError(\n \"Decimal scale cannot be specified without precision\"\n )\n expressions.append(sge.DataTypeParam(this=sge.Literal.number(scale)))\n\n return sge.DataType(this=typecode.DECIMAL, expressions=expressions or None)\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n code = typecode.TIMESTAMP if dtype.timezone is None else typecode.TIMESTAMPTZ\n if dtype.scale is not None:\n scale = sge.DataTypeParam(this=sge.Literal.number(dtype.scale))\n return sge.DataType(this=code, expressions=[scale])\n else:\n return sge.DataType(this=code)\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial):\n if (geotype := dtype.geotype) is not None:\n return sge.DataType(this=getattr(typecode, geotype.upper()))\n return sge.DataType(this=typecode.GEOMETRY)\n\n _from_ibis_Point = (\n _from_ibis_LineString\n ) = (\n _from_ibis_Polygon\n ) = (\n _from_ibis_MultiLineString\n ) = _from_ibis_MultiPoint = _from_ibis_MultiPolygon = _from_ibis_GeoSpatial\n\n\nclass PostgresType(SqlglotType):\n dialect = \"postgres\"\n default_interval_precision = \"s\"\n default_temporal_scale = 6\n\n unknown_type_strings = FrozenDict(\n {\n \"vector\": dt.unknown,\n \"tsvector\": dt.unknown,\n \"line\": dt.linestring,\n \"line[]\": dt.Array(dt.linestring),\n \"polygon\": dt.polygon,\n \"polygon[]\": dt.Array(dt.polygon),\n \"point\": dt.point,\n \"point[]\": dt.Array(dt.point),\n \"macaddr\": dt.macaddr,\n \"macaddr[]\": dt.Array(dt.macaddr),\n \"macaddr8\": dt.macaddr,\n \"macaddr8[]\": dt.Array(dt.macaddr),\n }\n )\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n if not dtype.key_type.is_string():\n raise com.IbisTypeError(\"Postgres only supports string keys in maps\")\n if not dtype.value_type.is_string():\n raise com.IbisTypeError(\"Postgres only supports string values in maps\")\n return sge.DataType(this=typecode.HSTORE)\n\n\nclass RisingWaveType(PostgresType):\n dialect = \"risingwave\"\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is not None:\n return sge.DataType(this=typecode.TIMESTAMPTZ)\n return sge.DataType(this=typecode.TIMESTAMP)\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n return sge.DataType(this=typecode.DECIMAL)\n\n @classmethod\n def _from_ibis_UUID(cls, dtype: dt.UUID) -> sge.DataType:\n return sge.DataType(this=typecode.VARCHAR)\n\n\nclass DataFusionType(PostgresType):\n unknown_type_strings = {\n \"utf8\": dt.string,\n \"float64\": dt.float64,\n }\n\n\nclass MySQLType(SqlglotType):\n dialect = \"mysql\"\n # these are mysql's defaults, see\n # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html\n default_decimal_precision = 10\n default_decimal_scale = 0\n\n unknown_type_strings = FrozenDict(\n {\n \"year(4)\": dt.int8,\n \"inet6\": dt.inet,\n }\n )\n\n @classmethod\n def _from_sqlglot_BIT(cls, nbits: sge.DataTypeParam) -> dt.Integer:\n nbits = int(nbits.this.this)\n if nbits > 32:\n return dt.Int64(nullable=cls.default_nullable)\n elif nbits > 16:\n return dt.Int32(nullable=cls.default_nullable)\n elif nbits > 8:\n return dt.Int16(nullable=cls.default_nullable)\n else:\n return dt.Int8(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls) -> dt.Timestamp:\n return dt.Timestamp(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(this=typecode.TEXT)\n\n\nclass DuckDBType(SqlglotType):\n dialect = \"duckdb\"\n default_decimal_precision = 18\n default_decimal_scale = 3\n default_interval_precision = \"us\"\n\n unknown_type_strings = FrozenDict({\"wkb_blob\": dt.binary})\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=6, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMPTZ(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=6, timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_S(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=0, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_MS(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=3, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_NS(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=9, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial):\n assert (\n dtype.geotype == \"geometry\"\n ), \"DuckDB only supports geometry types; geography types are not supported\"\n return sge.DataType(this=typecode.GEOMETRY)\n\n\nclass TrinoType(SqlglotType):\n dialect = \"trino\"\n default_decimal_precision = 18\n default_decimal_scale = 3\n default_temporal_scale = 3\n\n unknown_type_strings = FrozenDict(\n {\n \"interval year to month\": dt.Interval(\"M\"),\n \"interval day to second\": dt.Interval(\"ms\"),\n }\n )\n\n @classmethod\n def _from_ibis_Interval(cls, dtype: dt.Interval) -> sge.DataType:\n assert dtype.unit is not None, \"interval unit cannot be None\"\n if (short := dtype.unit.short) in (\"Y\", \"Q\", \"M\"):\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[\n sge.IntervalSpan(\n this=sge.Var(this=\"YEAR\"), expression=sge.Var(this=\"MONTH\")\n )\n ],\n )\n elif short in (\"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\"):\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[\n sge.IntervalSpan(\n this=sge.Var(this=\"DAY\"), expression=sge.Var(this=\"SECOND\")\n )\n ],\n )\n else:\n raise NotImplementedError(\n f\"Trino does not support {dtype.unit.name} intervals\"\n )\n\n @classmethod\n def _from_sqlglot_UBIGINT(cls):\n return dt.Decimal(precision=19, scale=0, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt64(cls, dtype):\n return sge.DataType(\n this=typecode.DECIMAL,\n expressions=[\n sge.DataTypeParam(this=sge.convert(19)),\n sge.DataTypeParam(this=sge.convert(0)),\n ],\n )\n\n @classmethod\n def _from_sqlglot_UINT(cls):\n return dt.Int64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt32(cls, dtype):\n return sge.DataType(this=typecode.BIGINT)\n\n @classmethod\n def _from_sqlglot_USMALLINT(cls):\n return dt.Int32(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt16(cls, dtype):\n return sge.DataType(this=typecode.INT)\n\n @classmethod\n def _from_sqlglot_UTINYINT(cls):\n return dt.Int16(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt8(cls, dtype):\n return sge.DataType(this=typecode.SMALLINT)\n\n\nclass DruidType(SqlglotType):\n # druid doesn't have a sophisticated type system and hive is close enough\n dialect = \"hive\"\n unknown_type_strings = FrozenDict({\"complex<json>\": dt.json})\n\n\nclass OracleType(SqlglotType):\n dialect = \"oracle\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n default_temporal_scale = 9\n\n unknown_type_strings = FrozenDict({\"raw\": dt.binary})\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DECIMAL(cls, precision=None, scale=None) -> dt.Decimal:\n if scale is None or int(scale.this.this) == 0:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n return super()._from_sqlglot_DECIMAL(precision, scale)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n nullable = \" NOT NULL\" if not dtype.nullable else \"\"\n return \"VARCHAR2(4000)\" + nullable\n\n\nclass SnowflakeType(SqlglotType):\n dialect = \"snowflake\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n default_temporal_scale = 9\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DECIMAL(cls, precision=None, scale=None) -> dt.Decimal:\n if scale is None or int(scale.this.this) == 0:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n return super()._from_sqlglot_DECIMAL(precision, scale)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type=None) -> dt.Array:\n assert value_type is None\n return dt.Array(dt.json, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_JSON(cls, dtype: dt.JSON) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.VARIANT)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.ARRAY, nested=True)\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.OBJECT, nested=True)\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.OBJECT, nested=True)\n\n\nclass SQLiteType(SqlglotType):\n dialect = \"sqlite\"\n\n @classmethod\n def _from_sqlglot_INT(cls) -> dt.Int64:\n return dt.Int64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Array types aren't supported in SQLite\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Map types aren't supported in SQLite\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n raise com.UnsupportedBackendType(\"Struct types aren't supported in SQLite\")\n\n\nclass ImpalaType(SqlglotType):\n dialect = \"impala\"\n\n default_decimal_precision = 9\n default_decimal_scale = 0\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Array types aren't supported in Impala\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Map types aren't supported in Impala\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n raise com.UnsupportedBackendType(\"Struct types aren't supported in Impala\")\n\n\nclass PySparkType(SqlglotType):\n dialect = \"spark\"\n\n default_decimal_precision = 38\n default_decimal_scale = 18\n\n\nclass BigQueryType(SqlglotType):\n dialect = \"bigquery\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n @classmethod\n def _from_sqlglot_NUMERIC(cls) -> dt.Decimal:\n return dt.Decimal(\n cls.default_decimal_precision,\n cls.default_decimal_scale,\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_BIGNUMERIC(cls) -> dt.Decimal:\n return dt.Decimal(76, 38, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=None, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOGRAPHY(cls) -> dt.GeoSpatial:\n return dt.GeoSpatial(\n geotype=\"geography\", srid=4326, nullable=cls.default_nullable\n )\n\n @classmethod\n def _from_sqlglot_TINYINT(cls) -> dt.Int64:\n return dt.Int64(nullable=cls.default_nullable)\n\n _from_sqlglot_UINT = (\n _from_sqlglot_USMALLINT\n ) = (\n _from_sqlglot_UTINYINT\n ) = _from_sqlglot_INT = _from_sqlglot_SMALLINT = _from_sqlglot_TINYINT\n\n @classmethod\n def _from_sqlglot_UBIGINT(cls) -> NoReturn:\n raise com.UnsupportedBackendType(\n \"Unsigned BIGINT isn't representable in BigQuery INT64\"\n )\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_MAP(cls) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps are not supported in BigQuery\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps are not supported in BigQuery\")\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is None:\n return sge.DataType(this=sge.DataType.Type.DATETIME)\n elif dtype.timezone == \"UTC\":\n return sge.DataType(this=sge.DataType.Type.TIMESTAMPTZ)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery does not support timestamps with timezones other than 'UTC'\"\n )\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n precision = dtype.precision\n scale = dtype.scale\n if (precision, scale) == (76, 38):\n return sge.DataType(this=sge.DataType.Type.BIGDECIMAL)\n elif (precision, scale) in ((38, 9), (None, None)):\n return sge.DataType(this=sge.DataType.Type.DECIMAL)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery only supports decimal types with precision of 38 and \"\n f\"scale of 9 (NUMERIC) or precision of 76 and scale of 38 (BIGNUMERIC). \"\n f\"Current precision: {dtype.precision}. Current scale: {dtype.scale}\"\n )\n\n @classmethod\n def _from_ibis_UInt64(cls, dtype: dt.UInt64) -> NoReturn:\n raise com.UnsupportedBackendType(\n f\"Conversion from {dtype} to BigQuery integer type (Int64) is lossy\"\n )\n\n @classmethod\n def _from_ibis_UInt32(cls, dtype: dt.UInt32) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.BIGINT)\n\n _from_ibis_UInt8 = _from_ibis_UInt16 = _from_ibis_UInt32\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial) -> sge.DataType:\n if (dtype.geotype, dtype.srid) == (\"geography\", 4326):\n return sge.DataType(this=sge.DataType.Type.GEOGRAPHY)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery geography uses points on WGS84 reference ellipsoid.\"\n f\"Current geotype: {dtype.geotype}, Current srid: {dtype.srid}\"\n )\n\n\nclass BigQueryUDFType(BigQueryType):\n @classmethod\n def _from_ibis_Int64(cls, dtype: dt.Int64) -> NoReturn:\n raise com.UnsupportedBackendType(\n \"int64 is not a supported input or output type in BigQuery UDFs; use float64 instead\"\n )\n\n\nclass ExasolType(SqlglotType):\n dialect = \"exasol\"\n\n default_temporal_scale = 3\n\n default_decimal_precision = 18\n default_decimal_scale = 0\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(\n this=sge.DataType.Type.VARCHAR,\n expressions=[sge.DataTypeParam(this=sge.convert(2_000_000))],\n )\n\n @classmethod\n def _from_sqlglot_DECIMAL(\n cls,\n precision: sge.DataTypeParam | None = None,\n scale: sge.DataTypeParam | None = None,\n ) -> dt.Decimal:\n if precision is None:\n precision = cls.default_decimal_precision\n else:\n precision = int(precision.this.this)\n\n if scale is None:\n scale = cls.default_decimal_scale\n else:\n scale = int(scale.this.this)\n\n if not scale:\n if 0 < precision <= 3:\n return dt.Int8(nullable=cls.default_nullable)\n elif 3 < precision <= 9:\n return dt.Int16(nullable=cls.default_nullable)\n elif 9 < precision <= 18:\n return dt.Int32(nullable=cls.default_nullable)\n elif 18 < precision <= 36:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n raise com.UnsupportedBackendType(\n \"Decimal precision is too large; Exasol supports precision up to 36.\"\n )\n return dt.Decimal(precision, scale, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Arrays not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> NoReturn:\n raise com.UnsupportedBackendType(\"Structs not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n code = typecode.TIMESTAMP if dtype.timezone is None else typecode.TIMESTAMPTZ\n return sge.DataType(this=code)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type: sge.DataType) -> NoReturn:\n raise com.UnsupportedBackendType(\"Arrays not supported in Exasol\")\n\n @classmethod\n def _from_sqlglot_MAP(cls, key: sge.DataType, value: sge.DataType) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps not supported in Exasol\")\n\n @classmethod\n def _from_sqlglot_STRUCT(cls, *cols: sge.ColumnDef) -> NoReturn:\n raise com.UnsupportedBackendType(\"Structs not supported in Exasol\")\n\n\nclass MSSQLType(SqlglotType):\n dialect = \"mssql\"\n\n @classmethod\n def _from_sqlglot_BIT(cls):\n return dt.Boolean(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_IMAGE(cls):\n return dt.Binary(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls, n=None):\n return dt.Timestamp(\n scale=n if n is None else int(n.this.this), nullable=cls.default_nullable\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls):\n return dt.Binary(nullable=False)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(\n this=typecode.VARCHAR,\n expressions=[sge.DataTypeParam(this=sge.Var(this=\"max\"))],\n )\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support arrays\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support \")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support structs\")\n\n @classmethod\n def _from_sqlglot_ARRAY(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support arrays\")\n\n @classmethod\n def _from_sqlglot_MAP(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support map\")\n\n @classmethod\n def _from_sqlglot_STRUCT(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support structs\")\n\n\nclass ClickHouseType(SqlglotType):\n dialect = \"clickhouse\"\n default_decimal_precision = None\n default_decimal_scale = None\n default_nullable = False\n\n unknown_type_strings = FrozenDict(\n {\n \"ipv4\": dt.INET(nullable=default_nullable),\n \"ipv6\": dt.INET(nullable=default_nullable),\n \"object('json')\": dt.JSON(nullable=default_nullable),\n \"array(null)\": dt.Array(dt.null, nullable=default_nullable),\n \"array(nothing)\": dt.Array(dt.null, nullable=default_nullable),\n }\n )\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sge.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n typ = super().from_ibis(dtype)\n if dtype.nullable and not dtype.is_map():\n # map cannot be nullable in clickhouse\n return sge.DataType(this=typecode.NULLABLE, expressions=[typ])\n else:\n return typ\n\n @classmethod\n def _from_sqlglot_NULLABLE(cls, inner_type: sge.DataType) -> dt.DataType:\n return cls.to_ibis(inner_type, nullable=True)\n\n @classmethod\n def _from_sqlglot_DATETIME(\n cls, timezone: sge.DataTypeParam | None = None\n ) -> dt.Timestamp:\n return dt.Timestamp(\n scale=0,\n timezone=None if timezone is None else timezone.this.this,\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_DATETIME64(\n cls,\n scale: sge.DataTypeSize | None = None,\n timezone: sge.Literal | None = None,\n ) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=None if timezone is None else timezone.this.this,\n scale=int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_LOWCARDINALITY(cls, inner_type: sge.DataType) -> dt.DataType:\n return cls.to_ibis(inner_type)\n\n @classmethod\n def _from_sqlglot_NESTED(cls, *fields: sge.DataType) -> dt.Struct:\n fields = {\n field.name: dt.Array(\n cls.to_ibis(field.args[\"kind\"]), nullable=cls.default_nullable\n )\n for field in fields\n }\n return dt.Struct(fields, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is None:\n timezone = None\n else:\n timezone = sge.DataTypeParam(this=sge.Literal.string(dtype.timezone))\n\n if dtype.scale is None:\n return sge.DataType(this=typecode.DATETIME, expressions=[timezone])\n else:\n scale = sge.DataTypeParam(this=sge.Literal.number(dtype.scale))\n return sge.DataType(this=typecode.DATETIME64, expressions=[scale, timezone])\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n # key cannot be nullable in clickhouse\n key_type = cls.from_ibis(dtype.key_type.copy(nullable=False))\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(this=typecode.MAP, expressions=[key_type, value_type])\n\n\nclass FlinkType(SqlglotType):\n dialect = \"flink\"\n default_decimal_precision = 38\n default_decimal_scale = 18\n\n @classmethod\n def _from_ibis_Binary(cls, dtype: dt.Binary) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.VARBINARY)\n", "path": "ibis/backends/base/sqlglot/datatypes.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import NoReturn\n\nimport sqlglot as sg\nimport sqlglot.expressions as sge\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nfrom ibis.common.collections import FrozenDict\nfrom ibis.formats import TypeMapper\n\ntypecode = sge.DataType.Type\n\n_from_sqlglot_types = {\n typecode.BIGDECIMAL: partial(dt.Decimal, 76, 38),\n typecode.BIGINT: dt.Int64,\n typecode.BINARY: dt.Binary,\n typecode.BOOLEAN: dt.Boolean,\n typecode.CHAR: dt.String,\n typecode.DATE: dt.Date,\n typecode.DOUBLE: dt.Float64,\n typecode.ENUM: dt.String,\n typecode.ENUM8: dt.String,\n typecode.ENUM16: dt.String,\n typecode.FLOAT: dt.Float32,\n typecode.FIXEDSTRING: dt.String,\n typecode.GEOMETRY: partial(dt.GeoSpatial, geotype=\"geometry\"),\n typecode.GEOGRAPHY: partial(dt.GeoSpatial, geotype=\"geography\"),\n typecode.HSTORE: partial(dt.Map, dt.string, dt.string),\n typecode.INET: dt.INET,\n typecode.INT128: partial(dt.Decimal, 38, 0),\n typecode.INT256: partial(dt.Decimal, 76, 0),\n typecode.INT: dt.Int32,\n typecode.IPADDRESS: dt.INET,\n typecode.JSON: dt.JSON,\n typecode.JSONB: dt.JSON,\n typecode.LONGBLOB: dt.Binary,\n typecode.LONGTEXT: dt.String,\n typecode.MEDIUMBLOB: dt.Binary,\n typecode.MEDIUMTEXT: dt.String,\n typecode.MONEY: dt.Int64,\n typecode.NCHAR: dt.String,\n typecode.UUID: dt.UUID,\n typecode.NULL: dt.Null,\n typecode.NVARCHAR: dt.String,\n typecode.OBJECT: partial(dt.Map, dt.string, dt.json),\n typecode.SMALLINT: dt.Int16,\n typecode.SMALLMONEY: dt.Int32,\n typecode.TEXT: dt.String,\n typecode.TIME: dt.Time,\n typecode.TIMETZ: dt.Time,\n typecode.TINYBLOB: dt.Binary,\n typecode.TINYINT: dt.Int8,\n typecode.TINYTEXT: dt.String,\n typecode.UBIGINT: dt.UInt64,\n typecode.UINT: dt.UInt32,\n typecode.USMALLINT: dt.UInt16,\n typecode.UTINYINT: dt.UInt8,\n typecode.UUID: dt.UUID,\n typecode.VARBINARY: dt.Binary,\n typecode.VARCHAR: dt.String,\n typecode.VARIANT: dt.JSON,\n typecode.UNIQUEIDENTIFIER: dt.UUID,\n typecode.SET: partial(dt.Array, dt.string),\n #############################\n # Unsupported sqlglot types #\n #############################\n # BIT = auto() # mysql\n # BIGSERIAL = auto()\n # DATETIME64 = auto() # clickhouse\n # ENUM = auto()\n # INT4RANGE = auto()\n # INT4MULTIRANGE = auto()\n # INT8RANGE = auto()\n # INT8MULTIRANGE = auto()\n # NUMRANGE = auto()\n # NUMMULTIRANGE = auto()\n # TSRANGE = auto()\n # TSMULTIRANGE = auto()\n # TSTZRANGE = auto()\n # TSTZMULTIRANGE = auto()\n # DATERANGE = auto()\n # DATEMULTIRANGE = auto()\n # HLLSKETCH = auto()\n # IMAGE = auto()\n # IPPREFIX = auto()\n # ROWVERSION = auto()\n # SERIAL = auto()\n # SET = auto()\n # SMALLSERIAL = auto()\n # SUPER = auto()\n # TIMESTAMPLTZ = auto()\n # UNKNOWN = auto() # Sentinel value, useful for type annotation\n # UINT128 = auto()\n # UINT256 = auto()\n # USERDEFINED = \"USER-DEFINED\"\n # XML = auto()\n}\n\n_to_sqlglot_types = {\n dt.Null: typecode.NULL,\n dt.Boolean: typecode.BOOLEAN,\n dt.Int8: typecode.TINYINT,\n dt.Int16: typecode.SMALLINT,\n dt.Int32: typecode.INT,\n dt.Int64: typecode.BIGINT,\n dt.UInt8: typecode.UTINYINT,\n dt.UInt16: typecode.USMALLINT,\n dt.UInt32: typecode.UINT,\n dt.UInt64: typecode.UBIGINT,\n dt.Float16: typecode.FLOAT,\n dt.Float32: typecode.FLOAT,\n dt.Float64: typecode.DOUBLE,\n dt.String: typecode.VARCHAR,\n dt.Binary: typecode.VARBINARY,\n dt.JSON: typecode.JSON,\n dt.INET: typecode.INET,\n dt.UUID: typecode.UUID,\n dt.MACADDR: typecode.VARCHAR,\n dt.Date: typecode.DATE,\n dt.Time: typecode.TIME,\n}\n\n\nclass SqlglotType(TypeMapper):\n dialect: str | None = None\n \"\"\"The dialect this parser is for.\"\"\"\n\n default_nullable = True\n \"\"\"Default nullability when not specified.\"\"\"\n\n default_decimal_precision: int | None = None\n \"\"\"Default decimal precision when not specified.\"\"\"\n\n default_decimal_scale: int | None = None\n \"\"\"Default decimal scale when not specified.\"\"\"\n\n default_temporal_scale: int | None = None\n \"\"\"Default temporal scale when not specified.\"\"\"\n\n default_interval_precision: str | None = None\n \"\"\"Default interval precision when not specified.\"\"\"\n\n unknown_type_strings: dict[str, dt.DataType] = {}\n \"\"\"String to ibis datatype mapping to use when converting unknown types.\"\"\"\n\n @classmethod\n def to_ibis(cls, typ: sge.DataType, nullable: bool | None = None) -> dt.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n typecode = typ.this\n\n # broken sqlglot thing\n if isinstance(typecode, sge.Interval):\n typ = sge.DataType(\n this=sge.DataType.Type.INTERVAL,\n expressions=[sge.IntervalSpan(this=typecode.unit)],\n )\n typecode = typ.this\n\n if method := getattr(cls, f\"_from_sqlglot_{typecode.name}\", None):\n dtype = method(*typ.expressions)\n else:\n dtype = _from_sqlglot_types[typecode](nullable=cls.default_nullable)\n\n if nullable is not None:\n return dtype.copy(nullable=nullable)\n else:\n return dtype\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sge.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n\n if method := getattr(cls, f\"_from_ibis_{dtype.name}\", None):\n return method(dtype)\n else:\n return sge.DataType(this=_to_sqlglot_types[type(dtype)])\n\n @classmethod\n def from_string(cls, text: str, nullable: bool | None = None) -> dt.DataType:\n if dtype := cls.unknown_type_strings.get(text.lower()):\n return dtype\n\n sgtype = sg.parse_one(text, into=sge.DataType, read=cls.dialect)\n return cls.to_ibis(sgtype, nullable=nullable)\n\n @classmethod\n def to_string(cls, dtype: dt.DataType) -> str:\n return cls.from_ibis(dtype).sql(dialect=cls.dialect)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type: sge.DataType) -> dt.Array:\n return dt.Array(cls.to_ibis(value_type), nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_MAP(\n cls, key_type: sge.DataType, value_type: sge.DataType\n ) -> dt.Map:\n return dt.Map(\n cls.to_ibis(key_type),\n cls.to_ibis(value_type),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_STRUCT(cls, *fields: sge.ColumnDef) -> dt.Struct:\n types = {}\n for i, field in enumerate(fields):\n if isinstance(field, sge.ColumnDef):\n types[field.name] = cls.to_ibis(field.args[\"kind\"])\n else:\n types[f\"f{i:d}\"] = cls.from_string(str(field))\n return dt.Struct(types, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMPTZ(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=\"UTC\",\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMPLTZ(cls, scale=None) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=\"UTC\",\n scale=cls.default_temporal_scale if scale is None else int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_INTERVAL(\n cls, precision_or_span: sge.IntervalSpan | None = None\n ) -> dt.Interval:\n nullable = cls.default_nullable\n if precision_or_span is None:\n precision_or_span = cls.default_interval_precision\n\n if isinstance(precision_or_span, str):\n return dt.Interval(precision_or_span, nullable=nullable)\n elif isinstance(precision_or_span, sge.IntervalSpan):\n if (expression := precision_or_span.expression) is not None:\n unit = expression.this\n else:\n unit = precision_or_span.this.this\n return dt.Interval(unit=unit, nullable=nullable)\n elif isinstance(precision_or_span, sge.Var):\n return dt.Interval(unit=precision_or_span.this, nullable=nullable)\n elif precision_or_span is None:\n raise com.IbisTypeError(\"Interval precision is None\")\n else:\n raise com.IbisTypeError(precision_or_span)\n\n @classmethod\n def _from_sqlglot_DECIMAL(\n cls,\n precision: sge.DataTypeParam | None = None,\n scale: sge.DataTypeParam | None = None,\n ) -> dt.Decimal:\n if precision is None:\n precision = cls.default_decimal_precision\n else:\n precision = int(precision.this.this)\n\n if scale is None:\n scale = cls.default_decimal_scale\n else:\n scale = int(scale.this.this)\n\n return dt.Decimal(precision, scale, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOMETRY(\n cls, arg: sge.DataTypeParam | None = None\n ) -> sge.DataType:\n if arg is not None:\n return getattr(dt, str(arg))(nullable=cls.default_nullable)\n return dt.GeoSpatial(geotype=\"geometry\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOGRAPHY(cls) -> sge.DataType:\n return dt.GeoSpatial(geotype=\"geography\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Interval(cls, dtype: dt.Interval) -> sge.DataType:\n assert dtype.unit is not None, \"interval unit cannot be None\"\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[sge.Var(this=dtype.unit.name)],\n )\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> sge.DataType:\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(this=typecode.ARRAY, expressions=[value_type], nested=True)\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n key_type = cls.from_ibis(dtype.key_type)\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(\n this=typecode.MAP, expressions=[key_type, value_type], nested=True\n )\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n fields = [\n sge.ColumnDef(this=str(name), kind=cls.from_ibis(field))\n for name, field in dtype.items()\n ]\n return sge.DataType(this=typecode.STRUCT, expressions=fields, nested=True)\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n if (precision := dtype.precision) is None:\n precision = cls.default_decimal_precision\n\n if (scale := dtype.scale) is None:\n scale = cls.default_decimal_scale\n\n expressions = []\n\n if precision is not None:\n expressions.append(sge.DataTypeParam(this=sge.Literal.number(precision)))\n\n if scale is not None:\n if precision is None:\n raise com.IbisTypeError(\n \"Decimal scale cannot be specified without precision\"\n )\n expressions.append(sge.DataTypeParam(this=sge.Literal.number(scale)))\n\n return sge.DataType(this=typecode.DECIMAL, expressions=expressions or None)\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n code = typecode.TIMESTAMP if dtype.timezone is None else typecode.TIMESTAMPTZ\n if dtype.scale is not None:\n scale = sge.DataTypeParam(this=sge.Literal.number(dtype.scale))\n return sge.DataType(this=code, expressions=[scale])\n else:\n return sge.DataType(this=code)\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial):\n if (geotype := dtype.geotype) is not None:\n return sge.DataType(this=getattr(typecode, geotype.upper()))\n return sge.DataType(this=typecode.GEOMETRY)\n\n _from_ibis_Point = (\n _from_ibis_LineString\n ) = (\n _from_ibis_Polygon\n ) = (\n _from_ibis_MultiLineString\n ) = _from_ibis_MultiPoint = _from_ibis_MultiPolygon = _from_ibis_GeoSpatial\n\n\nclass PostgresType(SqlglotType):\n dialect = \"postgres\"\n default_interval_precision = \"s\"\n default_temporal_scale = 6\n\n unknown_type_strings = FrozenDict(\n {\n \"vector\": dt.unknown,\n \"tsvector\": dt.unknown,\n \"line\": dt.linestring,\n \"line[]\": dt.Array(dt.linestring),\n \"polygon\": dt.polygon,\n \"polygon[]\": dt.Array(dt.polygon),\n \"point\": dt.point,\n \"point[]\": dt.Array(dt.point),\n \"macaddr\": dt.macaddr,\n \"macaddr[]\": dt.Array(dt.macaddr),\n \"macaddr8\": dt.macaddr,\n \"macaddr8[]\": dt.Array(dt.macaddr),\n }\n )\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n if not dtype.key_type.is_string():\n raise com.IbisTypeError(\"Postgres only supports string keys in maps\")\n if not dtype.value_type.is_string():\n raise com.IbisTypeError(\"Postgres only supports string values in maps\")\n return sge.DataType(this=typecode.HSTORE)\n\n\nclass RisingWaveType(PostgresType):\n dialect = \"risingwave\"\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is not None:\n return sge.DataType(this=typecode.TIMESTAMPTZ)\n return sge.DataType(this=typecode.TIMESTAMP)\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n return sge.DataType(this=typecode.DECIMAL)\n\n @classmethod\n def _from_ibis_UUID(cls, dtype: dt.UUID) -> sge.DataType:\n return sge.DataType(this=typecode.VARCHAR)\n\n\nclass DataFusionType(PostgresType):\n unknown_type_strings = {\n \"utf8\": dt.string,\n \"float64\": dt.float64,\n }\n\n\nclass MySQLType(SqlglotType):\n dialect = \"mysql\"\n # these are mysql's defaults, see\n # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html\n default_decimal_precision = 10\n default_decimal_scale = 0\n\n unknown_type_strings = FrozenDict(\n {\n \"year(4)\": dt.int8,\n \"inet6\": dt.inet,\n }\n )\n\n @classmethod\n def _from_sqlglot_BIT(cls, nbits: sge.DataTypeParam) -> dt.Integer:\n nbits = int(nbits.this.this)\n if nbits > 32:\n return dt.Int64(nullable=cls.default_nullable)\n elif nbits > 16:\n return dt.Int32(nullable=cls.default_nullable)\n elif nbits > 8:\n return dt.Int16(nullable=cls.default_nullable)\n else:\n return dt.Int8(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls) -> dt.Timestamp:\n return dt.Timestamp(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(this=typecode.TEXT)\n\n\nclass DuckDBType(SqlglotType):\n dialect = \"duckdb\"\n default_decimal_precision = 18\n default_decimal_scale = 3\n default_interval_precision = \"us\"\n\n unknown_type_strings = FrozenDict({\"wkb_blob\": dt.binary})\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=6, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMPTZ(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=6, timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_S(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=0, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_MS(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=3, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP_NS(cls) -> dt.Timestamp:\n return dt.Timestamp(scale=9, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial):\n assert (\n dtype.geotype == \"geometry\"\n ), \"DuckDB only supports geometry types; geography types are not supported\"\n return sge.DataType(this=typecode.GEOMETRY)\n\n\nclass TrinoType(SqlglotType):\n dialect = \"trino\"\n default_decimal_precision = 18\n default_decimal_scale = 3\n default_temporal_scale = 3\n\n unknown_type_strings = FrozenDict(\n {\n \"interval year to month\": dt.Interval(\"M\"),\n \"interval day to second\": dt.Interval(\"ms\"),\n }\n )\n\n @classmethod\n def _from_ibis_Interval(cls, dtype: dt.Interval) -> sge.DataType:\n assert dtype.unit is not None, \"interval unit cannot be None\"\n if (short := dtype.unit.short) in (\"Y\", \"Q\", \"M\"):\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[\n sge.IntervalSpan(\n this=sge.Var(this=\"YEAR\"), expression=sge.Var(this=\"MONTH\")\n )\n ],\n )\n elif short in (\"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\"):\n return sge.DataType(\n this=typecode.INTERVAL,\n expressions=[\n sge.IntervalSpan(\n this=sge.Var(this=\"DAY\"), expression=sge.Var(this=\"SECOND\")\n )\n ],\n )\n else:\n raise NotImplementedError(\n f\"Trino does not support {dtype.unit.name} intervals\"\n )\n\n @classmethod\n def _from_sqlglot_UBIGINT(cls):\n return dt.Decimal(precision=19, scale=0, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt64(cls, dtype):\n return sge.DataType(\n this=typecode.DECIMAL,\n expressions=[\n sge.DataTypeParam(this=sge.convert(19)),\n sge.DataTypeParam(this=sge.convert(0)),\n ],\n )\n\n @classmethod\n def _from_sqlglot_UINT(cls):\n return dt.Int64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt32(cls, dtype):\n return sge.DataType(this=typecode.BIGINT)\n\n @classmethod\n def _from_sqlglot_USMALLINT(cls):\n return dt.Int32(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt16(cls, dtype):\n return sge.DataType(this=typecode.INT)\n\n @classmethod\n def _from_sqlglot_UTINYINT(cls):\n return dt.Int16(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_UInt8(cls, dtype):\n return sge.DataType(this=typecode.SMALLINT)\n\n\nclass DruidType(SqlglotType):\n # druid doesn't have a sophisticated type system and hive is close enough\n dialect = \"hive\"\n unknown_type_strings = FrozenDict({\"complex<json>\": dt.json})\n\n\nclass OracleType(SqlglotType):\n dialect = \"oracle\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n default_temporal_scale = 9\n\n unknown_type_strings = FrozenDict({\"raw\": dt.binary})\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DECIMAL(cls, precision=None, scale=None) -> dt.Decimal:\n if scale is None or int(scale.this.this) == 0:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n return super()._from_sqlglot_DECIMAL(precision, scale)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n nullable = \" NOT NULL\" if not dtype.nullable else \"\"\n return \"VARCHAR2(4000)\" + nullable\n\n\nclass SnowflakeType(SqlglotType):\n dialect = \"snowflake\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n default_temporal_scale = 9\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DECIMAL(cls, precision=None, scale=None) -> dt.Decimal:\n if scale is None or int(scale.this.this) == 0:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n return super()._from_sqlglot_DECIMAL(precision, scale)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type=None) -> dt.Array:\n assert value_type is None\n return dt.Array(dt.json, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_JSON(cls, dtype: dt.JSON) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.VARIANT)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.ARRAY, nested=True)\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.OBJECT, nested=True)\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.OBJECT, nested=True)\n\n\nclass SQLiteType(SqlglotType):\n dialect = \"sqlite\"\n\n @classmethod\n def _from_sqlglot_INT(cls) -> dt.Int64:\n return dt.Int64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Array types aren't supported in SQLite\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Map types aren't supported in SQLite\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n raise com.UnsupportedBackendType(\"Struct types aren't supported in SQLite\")\n\n\nclass ImpalaType(SqlglotType):\n dialect = \"impala\"\n\n default_decimal_precision = 9\n default_decimal_scale = 0\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Array types aren't supported in Impala\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Map types aren't supported in Impala\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> sge.DataType:\n raise com.UnsupportedBackendType(\"Struct types aren't supported in Impala\")\n\n\nclass PySparkType(SqlglotType):\n dialect = \"spark\"\n\n default_decimal_precision = 38\n default_decimal_scale = 18\n\n\nclass BigQueryType(SqlglotType):\n dialect = \"bigquery\"\n\n default_decimal_precision = 38\n default_decimal_scale = 9\n\n @classmethod\n def _from_sqlglot_NUMERIC(cls) -> dt.Decimal:\n return dt.Decimal(\n cls.default_decimal_precision,\n cls.default_decimal_scale,\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_BIGNUMERIC(cls) -> dt.Decimal:\n return dt.Decimal(76, 38, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=None, nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls) -> dt.Timestamp:\n return dt.Timestamp(timezone=\"UTC\", nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_GEOGRAPHY(cls) -> dt.GeoSpatial:\n return dt.GeoSpatial(\n geotype=\"geography\", srid=4326, nullable=cls.default_nullable\n )\n\n @classmethod\n def _from_sqlglot_TINYINT(cls) -> dt.Int64:\n return dt.Int64(nullable=cls.default_nullable)\n\n _from_sqlglot_UINT = (\n _from_sqlglot_USMALLINT\n ) = (\n _from_sqlglot_UTINYINT\n ) = _from_sqlglot_INT = _from_sqlglot_SMALLINT = _from_sqlglot_TINYINT\n\n @classmethod\n def _from_sqlglot_UBIGINT(cls) -> NoReturn:\n raise com.UnsupportedBackendType(\n \"Unsigned BIGINT isn't representable in BigQuery INT64\"\n )\n\n @classmethod\n def _from_sqlglot_FLOAT(cls) -> dt.Float64:\n return dt.Float64(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_MAP(cls) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps are not supported in BigQuery\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps are not supported in BigQuery\")\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is None:\n return sge.DataType(this=sge.DataType.Type.DATETIME)\n elif dtype.timezone == \"UTC\":\n return sge.DataType(this=sge.DataType.Type.TIMESTAMPTZ)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery does not support timestamps with timezones other than 'UTC'\"\n )\n\n @classmethod\n def _from_ibis_Decimal(cls, dtype: dt.Decimal) -> sge.DataType:\n precision = dtype.precision\n scale = dtype.scale\n if (precision, scale) == (76, 38):\n return sge.DataType(this=sge.DataType.Type.BIGDECIMAL)\n elif (precision, scale) in ((38, 9), (None, None)):\n return sge.DataType(this=sge.DataType.Type.DECIMAL)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery only supports decimal types with precision of 38 and \"\n f\"scale of 9 (NUMERIC) or precision of 76 and scale of 38 (BIGNUMERIC). \"\n f\"Current precision: {dtype.precision}. Current scale: {dtype.scale}\"\n )\n\n @classmethod\n def _from_ibis_UInt64(cls, dtype: dt.UInt64) -> NoReturn:\n raise com.UnsupportedBackendType(\n f\"Conversion from {dtype} to BigQuery integer type (Int64) is lossy\"\n )\n\n @classmethod\n def _from_ibis_UInt32(cls, dtype: dt.UInt32) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.BIGINT)\n\n _from_ibis_UInt8 = _from_ibis_UInt16 = _from_ibis_UInt32\n\n @classmethod\n def _from_ibis_GeoSpatial(cls, dtype: dt.GeoSpatial) -> sge.DataType:\n if (dtype.geotype, dtype.srid) == (\"geography\", 4326):\n return sge.DataType(this=sge.DataType.Type.GEOGRAPHY)\n else:\n raise com.UnsupportedBackendType(\n \"BigQuery geography uses points on WGS84 reference ellipsoid.\"\n f\"Current geotype: {dtype.geotype}, Current srid: {dtype.srid}\"\n )\n\n\nclass BigQueryUDFType(BigQueryType):\n @classmethod\n def _from_ibis_Int64(cls, dtype: dt.Int64) -> NoReturn:\n raise com.UnsupportedBackendType(\n \"int64 is not a supported input or output type in BigQuery UDFs; use float64 instead\"\n )\n\n\nclass ExasolType(SqlglotType):\n dialect = \"exasol\"\n\n default_temporal_scale = 3\n\n default_decimal_precision = 18\n default_decimal_scale = 0\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(\n this=sge.DataType.Type.VARCHAR,\n expressions=[sge.DataTypeParam(this=sge.convert(2_000_000))],\n )\n\n @classmethod\n def _from_sqlglot_DECIMAL(\n cls,\n precision: sge.DataTypeParam | None = None,\n scale: sge.DataTypeParam | None = None,\n ) -> dt.Decimal:\n if precision is None:\n precision = cls.default_decimal_precision\n else:\n precision = int(precision.this.this)\n\n if scale is None:\n scale = cls.default_decimal_scale\n else:\n scale = int(scale.this.this)\n\n if not scale:\n if 0 < precision <= 3:\n return dt.Int8(nullable=cls.default_nullable)\n elif 3 < precision <= 9:\n return dt.Int16(nullable=cls.default_nullable)\n elif 9 < precision <= 18:\n return dt.Int32(nullable=cls.default_nullable)\n elif 18 < precision <= 36:\n return dt.Int64(nullable=cls.default_nullable)\n else:\n raise com.UnsupportedBackendType(\n \"Decimal precision is too large; Exasol supports precision up to 36.\"\n )\n return dt.Decimal(precision, scale, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.Array) -> NoReturn:\n raise com.UnsupportedBackendType(\"Arrays not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.Struct) -> NoReturn:\n raise com.UnsupportedBackendType(\"Structs not supported in Exasol\")\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n code = typecode.TIMESTAMP if dtype.timezone is None else typecode.TIMESTAMPTZ\n return sge.DataType(this=code)\n\n @classmethod\n def _from_sqlglot_ARRAY(cls, value_type: sge.DataType) -> NoReturn:\n raise com.UnsupportedBackendType(\"Arrays not supported in Exasol\")\n\n @classmethod\n def _from_sqlglot_MAP(cls, key: sge.DataType, value: sge.DataType) -> NoReturn:\n raise com.UnsupportedBackendType(\"Maps not supported in Exasol\")\n\n @classmethod\n def _from_sqlglot_STRUCT(cls, *cols: sge.ColumnDef) -> NoReturn:\n raise com.UnsupportedBackendType(\"Structs not supported in Exasol\")\n\n\nclass MSSQLType(SqlglotType):\n dialect = \"mssql\"\n\n unknown_type_strings = FrozenDict({\"hierarchyid\": dt.string})\n\n @classmethod\n def _from_sqlglot_BIT(cls):\n return dt.Boolean(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_IMAGE(cls):\n return dt.Binary(nullable=cls.default_nullable)\n\n @classmethod\n def _from_sqlglot_DATETIME(cls, n=None):\n return dt.Timestamp(\n scale=n if n is None else int(n.this.this), nullable=cls.default_nullable\n )\n\n @classmethod\n def _from_sqlglot_TIMESTAMP(cls):\n return dt.Binary(nullable=False)\n\n @classmethod\n def _from_ibis_String(cls, dtype: dt.String) -> sge.DataType:\n return sge.DataType(\n this=typecode.VARCHAR,\n expressions=[sge.DataTypeParam(this=sge.Var(this=\"max\"))],\n )\n\n @classmethod\n def _from_ibis_Array(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support arrays\")\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support \")\n\n @classmethod\n def _from_ibis_Struct(cls, dtype: dt.String) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support structs\")\n\n @classmethod\n def _from_sqlglot_ARRAY(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support arrays\")\n\n @classmethod\n def _from_sqlglot_MAP(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support map\")\n\n @classmethod\n def _from_sqlglot_STRUCT(cls) -> sge.DataType:\n raise com.UnsupportedBackendType(\"SQL Server does not support structs\")\n\n\nclass ClickHouseType(SqlglotType):\n dialect = \"clickhouse\"\n default_decimal_precision = None\n default_decimal_scale = None\n default_nullable = False\n\n unknown_type_strings = FrozenDict(\n {\n \"ipv4\": dt.INET(nullable=default_nullable),\n \"ipv6\": dt.INET(nullable=default_nullable),\n \"object('json')\": dt.JSON(nullable=default_nullable),\n \"array(null)\": dt.Array(dt.null, nullable=default_nullable),\n \"array(nothing)\": dt.Array(dt.null, nullable=default_nullable),\n }\n )\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sge.DataType:\n \"\"\"Convert a sqlglot type to an ibis type.\"\"\"\n typ = super().from_ibis(dtype)\n if dtype.nullable and not dtype.is_map():\n # map cannot be nullable in clickhouse\n return sge.DataType(this=typecode.NULLABLE, expressions=[typ])\n else:\n return typ\n\n @classmethod\n def _from_sqlglot_NULLABLE(cls, inner_type: sge.DataType) -> dt.DataType:\n return cls.to_ibis(inner_type, nullable=True)\n\n @classmethod\n def _from_sqlglot_DATETIME(\n cls, timezone: sge.DataTypeParam | None = None\n ) -> dt.Timestamp:\n return dt.Timestamp(\n scale=0,\n timezone=None if timezone is None else timezone.this.this,\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_DATETIME64(\n cls,\n scale: sge.DataTypeSize | None = None,\n timezone: sge.Literal | None = None,\n ) -> dt.Timestamp:\n return dt.Timestamp(\n timezone=None if timezone is None else timezone.this.this,\n scale=int(scale.this.this),\n nullable=cls.default_nullable,\n )\n\n @classmethod\n def _from_sqlglot_LOWCARDINALITY(cls, inner_type: sge.DataType) -> dt.DataType:\n return cls.to_ibis(inner_type)\n\n @classmethod\n def _from_sqlglot_NESTED(cls, *fields: sge.DataType) -> dt.Struct:\n fields = {\n field.name: dt.Array(\n cls.to_ibis(field.args[\"kind\"]), nullable=cls.default_nullable\n )\n for field in fields\n }\n return dt.Struct(fields, nullable=cls.default_nullable)\n\n @classmethod\n def _from_ibis_Timestamp(cls, dtype: dt.Timestamp) -> sge.DataType:\n if dtype.timezone is None:\n timezone = None\n else:\n timezone = sge.DataTypeParam(this=sge.Literal.string(dtype.timezone))\n\n if dtype.scale is None:\n return sge.DataType(this=typecode.DATETIME, expressions=[timezone])\n else:\n scale = sge.DataTypeParam(this=sge.Literal.number(dtype.scale))\n return sge.DataType(this=typecode.DATETIME64, expressions=[scale, timezone])\n\n @classmethod\n def _from_ibis_Map(cls, dtype: dt.Map) -> sge.DataType:\n # key cannot be nullable in clickhouse\n key_type = cls.from_ibis(dtype.key_type.copy(nullable=False))\n value_type = cls.from_ibis(dtype.value_type)\n return sge.DataType(this=typecode.MAP, expressions=[key_type, value_type])\n\n\nclass FlinkType(SqlglotType):\n dialect = \"flink\"\n default_decimal_precision = 38\n default_decimal_scale = 18\n\n @classmethod\n def _from_ibis_Binary(cls, dtype: dt.Binary) -> sge.DataType:\n return sge.DataType(this=sge.DataType.Type.VARBINARY)\n", "path": "ibis/backends/base/sqlglot/datatypes.py" } ]
diff --git a/ibis/backends/base/sqlglot/datatypes.py b/ibis/backends/base/sqlglot/datatypes.py index 9cd03adbd622..fb104c11746f 100644 --- a/ibis/backends/base/sqlglot/datatypes.py +++ b/ibis/backends/base/sqlglot/datatypes.py @@ -894,6 +894,8 @@ def _from_sqlglot_STRUCT(cls, *cols: sge.ColumnDef) -> NoReturn: class MSSQLType(SqlglotType): dialect = "mssql" + unknown_type_strings = FrozenDict({"hierarchyid": dt.string}) + @classmethod def _from_sqlglot_BIT(cls): return dt.Boolean(nullable=cls.default_nullable) diff --git a/ibis/backends/mssql/tests/test_client.py b/ibis/backends/mssql/tests/test_client.py index 102b5c2da721..c085eb126616 100644 --- a/ibis/backends/mssql/tests/test_client.py +++ b/ibis/backends/mssql/tests/test_client.py @@ -52,6 +52,7 @@ ("DATETIMEOFFSET(5)", dt.timestamp(scale=5, timezone="UTC")), ("GEOMETRY", dt.geometry), ("GEOGRAPHY", dt.geography), + ("HIERARCHYID", dt.string), ]
googleapis__google-api-python-client-497
Documentation not correct for "import" function for variants Line 142 of google-api-python-client/googleapiclient/discovery.py Reads: _The name with a '\_' prefixed if the name is a reserved word._ Should read: _The name with a '\_' appended if the name is a reserved word._ Also, the [suggested code in the docs](https://cloud.google.com/genomics/reference/rest/v1/variants/import) for python is not correct about how to call this method. The example line reading: `request = service.variants().import(body=import_variants_request_body) response = request.execute()` Should be: `request = service.variants().import_(body=import_variants_request_body) response = request.execute()`
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Client for discovery based APIs.\n\nA client library for Google's discovery based APIs.\n\"\"\"\nfrom __future__ import absolute_import\nimport six\nfrom six.moves import zip\n\n__author__ = '[email protected] (Joe Gregorio)'\n__all__ = [\n 'build',\n 'build_from_document',\n 'fix_method_name',\n 'key2param',\n ]\n\nfrom six import BytesIO\nfrom six.moves import http_client\nfrom six.moves.urllib.parse import urlencode, urlparse, urljoin, \\\n urlunparse, parse_qsl\n\n# Standard library imports\nimport copy\ntry:\n from email.generator import BytesGenerator\nexcept ImportError:\n from email.generator import Generator as BytesGenerator\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nimport json\nimport keyword\nimport logging\nimport mimetypes\nimport os\nimport re\n\n# Third-party imports\nimport httplib2\nimport uritemplate\n\n# Local imports\nfrom googleapiclient import _auth\nfrom googleapiclient import mimeparse\nfrom googleapiclient.errors import HttpError\nfrom googleapiclient.errors import InvalidJsonError\nfrom googleapiclient.errors import MediaUploadSizeError\nfrom googleapiclient.errors import UnacceptableMimeTypeError\nfrom googleapiclient.errors import UnknownApiNameOrVersion\nfrom googleapiclient.errors import UnknownFileType\nfrom googleapiclient.http import build_http\nfrom googleapiclient.http import BatchHttpRequest\nfrom googleapiclient.http import HttpMock\nfrom googleapiclient.http import HttpMockSequence\nfrom googleapiclient.http import HttpRequest\nfrom googleapiclient.http import MediaFileUpload\nfrom googleapiclient.http import MediaUpload\nfrom googleapiclient.model import JsonModel\nfrom googleapiclient.model import MediaModel\nfrom googleapiclient.model import RawModel\nfrom googleapiclient.schema import Schemas\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client.util import _add_query_parameter\n from oauth2client.util import positional\nexcept ImportError:\n from oauth2client._helpers import _add_query_parameter\n from oauth2client._helpers import positional\n\n\n# The client library requires a version of httplib2 that supports RETRIES.\nhttplib2.RETRIES = 1\n\nlogger = logging.getLogger(__name__)\n\nURITEMPLATE = re.compile('{[^}]*}')\nVARNAME = re.compile('[a-zA-Z0-9_-]+')\nDISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'\n '{api}/{apiVersion}/rest')\nV1_DISCOVERY_URI = DISCOVERY_URI\nV2_DISCOVERY_URI = ('https://{api}.googleapis.com/$discovery/rest?'\n 'version={apiVersion}')\nDEFAULT_METHOD_DOC = 'A description of how to use this function'\nHTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])\n\n_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}\nBODY_PARAMETER_DEFAULT_VALUE = {\n 'description': 'The request body.',\n 'type': 'object',\n 'required': True,\n}\nMEDIA_BODY_PARAMETER_DEFAULT_VALUE = {\n 'description': ('The filename of the media request body, or an instance '\n 'of a MediaUpload object.'),\n 'type': 'string',\n 'required': False,\n}\nMEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE = {\n 'description': ('The MIME type of the media request body, or an instance '\n 'of a MediaUpload object.'),\n 'type': 'string',\n 'required': False,\n}\n_PAGE_TOKEN_NAMES = ('pageToken', 'nextPageToken')\n\n# Parameters accepted by the stack, but not visible via discovery.\n# TODO(dhermes): Remove 'userip' in 'v2'.\nSTACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])\nSTACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}\n\n# Library-specific reserved words beyond Python keywords.\nRESERVED_WORDS = frozenset(['body'])\n\n# patch _write_lines to avoid munging '\\r' into '\\n'\n# ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )\nclass _BytesGenerator(BytesGenerator):\n _write_lines = BytesGenerator.write\n\ndef fix_method_name(name):\n \"\"\"Fix method names to avoid reserved word conflicts.\n\n Args:\n name: string, method name.\n\n Returns:\n The name with a '_' prefixed if the name is a reserved word.\n \"\"\"\n if keyword.iskeyword(name) or name in RESERVED_WORDS:\n return name + '_'\n else:\n return name\n\n\ndef key2param(key):\n \"\"\"Converts key names into parameter names.\n\n For example, converting \"max-results\" -> \"max_results\"\n\n Args:\n key: string, the method key name.\n\n Returns:\n A safe method name based on the key name.\n \"\"\"\n result = []\n key = list(key)\n if not key[0].isalpha():\n result.append('x')\n for c in key:\n if c.isalnum():\n result.append(c)\n else:\n result.append('_')\n\n return ''.join(result)\n\n\n@positional(2)\ndef build(serviceName,\n version,\n http=None,\n discoveryServiceUrl=DISCOVERY_URI,\n developerKey=None,\n model=None,\n requestBuilder=HttpRequest,\n credentials=None,\n cache_discovery=True,\n cache=None):\n \"\"\"Construct a Resource for interacting with an API.\n\n Construct a Resource object for interacting with an API. The serviceName and\n version are the names from the Discovery service.\n\n Args:\n serviceName: string, name of the service.\n version: string, the version of the service.\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it that HTTP requests will be made through.\n discoveryServiceUrl: string, a URI Template that points to the location of\n the discovery service. It should have two parameters {api} and\n {apiVersion} that when filled in produce an absolute URI to the discovery\n document for that service.\n developerKey: string, key obtained from\n https://code.google.com/apis/console.\n model: googleapiclient.Model, converts to and from the wire format.\n requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP\n request.\n credentials: oauth2client.Credentials or\n google.auth.credentials.Credentials, credentials to be used for\n authentication.\n cache_discovery: Boolean, whether or not to cache the discovery doc.\n cache: googleapiclient.discovery_cache.base.CacheBase, an optional\n cache object for the discovery documents.\n\n Returns:\n A Resource object with methods for interacting with the service.\n \"\"\"\n params = {\n 'api': serviceName,\n 'apiVersion': version\n }\n\n if http is None:\n discovery_http = build_http()\n else:\n discovery_http = http\n\n for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI,):\n requested_url = uritemplate.expand(discovery_url, params)\n\n try:\n content = _retrieve_discovery_doc(\n requested_url, discovery_http, cache_discovery, cache)\n return build_from_document(content, base=discovery_url, http=http,\n developerKey=developerKey, model=model, requestBuilder=requestBuilder,\n credentials=credentials)\n except HttpError as e:\n if e.resp.status == http_client.NOT_FOUND:\n continue\n else:\n raise e\n\n raise UnknownApiNameOrVersion(\n \"name: %s version: %s\" % (serviceName, version))\n\n\ndef _retrieve_discovery_doc(url, http, cache_discovery, cache=None):\n \"\"\"Retrieves the discovery_doc from cache or the internet.\n\n Args:\n url: string, the URL of the discovery document.\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it through which HTTP requests will be made.\n cache_discovery: Boolean, whether or not to cache the discovery doc.\n cache: googleapiclient.discovery_cache.base.Cache, an optional cache\n object for the discovery documents.\n\n Returns:\n A unicode string representation of the discovery document.\n \"\"\"\n if cache_discovery:\n from . import discovery_cache\n from .discovery_cache import base\n if cache is None:\n cache = discovery_cache.autodetect()\n if cache:\n content = cache.get(url)\n if content:\n return content\n\n actual_url = url\n # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment\n # variable that contains the network address of the client sending the\n # request. If it exists then add that to the request for the discovery\n # document to avoid exceeding the quota on discovery requests.\n if 'REMOTE_ADDR' in os.environ:\n actual_url = _add_query_parameter(url, 'userIp', os.environ['REMOTE_ADDR'])\n logger.info('URL being requested: GET %s', actual_url)\n\n resp, content = http.request(actual_url)\n\n if resp.status >= 400:\n raise HttpError(resp, content, uri=actual_url)\n\n try:\n content = content.decode('utf-8')\n except AttributeError:\n pass\n\n try:\n service = json.loads(content)\n except ValueError as e:\n logger.error('Failed to parse as JSON: ' + content)\n raise InvalidJsonError()\n if cache_discovery and cache:\n cache.set(url, content)\n return content\n\n\n@positional(1)\ndef build_from_document(\n service,\n base=None,\n future=None,\n http=None,\n developerKey=None,\n model=None,\n requestBuilder=HttpRequest,\n credentials=None):\n \"\"\"Create a Resource for interacting with an API.\n\n Same as `build()`, but constructs the Resource object from a discovery\n document that is it given, as opposed to retrieving one over HTTP.\n\n Args:\n service: string or object, the JSON discovery document describing the API.\n The value passed in may either be the JSON string or the deserialized\n JSON.\n base: string, base URI for all HTTP requests, usually the discovery URI.\n This parameter is no longer used as rootUrl and servicePath are included\n within the discovery document. (deprecated)\n future: string, discovery document with future capabilities (deprecated).\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it that HTTP requests will be made through.\n developerKey: string, Key for controlling API usage, generated\n from the API Console.\n model: Model class instance that serializes and de-serializes requests and\n responses.\n requestBuilder: Takes an http request and packages it up to be executed.\n credentials: oauth2client.Credentials or\n google.auth.credentials.Credentials, credentials to be used for\n authentication.\n\n Returns:\n A Resource object with methods for interacting with the service.\n \"\"\"\n\n if http is not None and credentials is not None:\n raise ValueError('Arguments http and credentials are mutually exclusive.')\n\n if isinstance(service, six.string_types):\n service = json.loads(service)\n\n if 'rootUrl' not in service and (isinstance(http, (HttpMock,\n HttpMockSequence))):\n logger.error(\"You are using HttpMock or HttpMockSequence without\" +\n \"having the service discovery doc in cache. Try calling \" +\n \"build() without mocking once first to populate the \" +\n \"cache.\")\n raise InvalidJsonError()\n\n base = urljoin(service['rootUrl'], service['servicePath'])\n schema = Schemas(service)\n\n # If the http client is not specified, then we must construct an http client\n # to make requests. If the service has scopes, then we also need to setup\n # authentication.\n if http is None:\n # Does the service require scopes?\n scopes = list(\n service.get('auth', {}).get('oauth2', {}).get('scopes', {}).keys())\n\n # If so, then the we need to setup authentication if no developerKey is\n # specified.\n if scopes and not developerKey:\n # If the user didn't pass in credentials, attempt to acquire application\n # default credentials.\n if credentials is None:\n credentials = _auth.default_credentials()\n\n # The credentials need to be scoped.\n credentials = _auth.with_scopes(credentials, scopes)\n\n # Create an authorized http instance\n http = _auth.authorized_http(credentials)\n\n # If the service doesn't require scopes then there is no need for\n # authentication.\n else:\n http = build_http()\n\n if model is None:\n features = service.get('features', [])\n model = JsonModel('dataWrapper' in features)\n\n return Resource(http=http, baseUrl=base, model=model,\n developerKey=developerKey, requestBuilder=requestBuilder,\n resourceDesc=service, rootDesc=service, schema=schema)\n\n\ndef _cast(value, schema_type):\n \"\"\"Convert value to a string based on JSON Schema type.\n\n See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on\n JSON Schema.\n\n Args:\n value: any, the value to convert\n schema_type: string, the type that value should be interpreted as\n\n Returns:\n A string representation of 'value' based on the schema_type.\n \"\"\"\n if schema_type == 'string':\n if type(value) == type('') or type(value) == type(u''):\n return value\n else:\n return str(value)\n elif schema_type == 'integer':\n return str(int(value))\n elif schema_type == 'number':\n return str(float(value))\n elif schema_type == 'boolean':\n return str(bool(value)).lower()\n else:\n if type(value) == type('') or type(value) == type(u''):\n return value\n else:\n return str(value)\n\n\ndef _media_size_to_long(maxSize):\n \"\"\"Convert a string media size, such as 10GB or 3TB into an integer.\n\n Args:\n maxSize: string, size as a string, such as 2MB or 7GB.\n\n Returns:\n The size as an integer value.\n \"\"\"\n if len(maxSize) < 2:\n return 0\n units = maxSize[-2:].upper()\n bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)\n if bit_shift is not None:\n return int(maxSize[:-2]) << bit_shift\n else:\n return int(maxSize)\n\n\ndef _media_path_url_from_info(root_desc, path_url):\n \"\"\"Creates an absolute media path URL.\n\n Constructed using the API root URI and service path from the discovery\n document and the relative path for the API method.\n\n Args:\n root_desc: Dictionary; the entire original deserialized discovery document.\n path_url: String; the relative URL for the API method. Relative to the API\n root, which is specified in the discovery document.\n\n Returns:\n String; the absolute URI for media upload for the API method.\n \"\"\"\n return '%(root)supload/%(service_path)s%(path)s' % {\n 'root': root_desc['rootUrl'],\n 'service_path': root_desc['servicePath'],\n 'path': path_url,\n }\n\n\ndef _fix_up_parameters(method_desc, root_desc, http_method):\n \"\"\"Updates parameters of an API method with values specific to this library.\n\n Specifically, adds whatever global parameters are specified by the API to the\n parameters for the individual method. Also adds parameters which don't\n appear in the discovery document, but are available to all discovery based\n APIs (these are listed in STACK_QUERY_PARAMETERS).\n\n SIDE EFFECTS: This updates the parameters dictionary object in the method\n description.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n http_method: String; the HTTP method used to call the API method described\n in method_desc.\n\n Returns:\n The updated Dictionary stored in the 'parameters' key of the method\n description dictionary.\n \"\"\"\n parameters = method_desc.setdefault('parameters', {})\n\n # Add in the parameters common to all methods.\n for name, description in six.iteritems(root_desc.get('parameters', {})):\n parameters[name] = description\n\n # Add in undocumented query parameters.\n for name in STACK_QUERY_PARAMETERS:\n parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()\n\n # Add 'body' (our own reserved word) to parameters if the method supports\n # a request payload.\n if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:\n body = BODY_PARAMETER_DEFAULT_VALUE.copy()\n body.update(method_desc['request'])\n parameters['body'] = body\n\n return parameters\n\n\ndef _fix_up_media_upload(method_desc, root_desc, path_url, parameters):\n \"\"\"Adds 'media_body' and 'media_mime_type' parameters if supported by method.\n\n SIDE EFFECTS: If the method supports media upload and has a required body,\n sets body to be optional (required=False) instead. Also, if there is a\n 'mediaUpload' in the method description, adds 'media_upload' key to\n parameters.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n path_url: String; the relative URL for the API method. Relative to the API\n root, which is specified in the discovery document.\n parameters: A dictionary describing method parameters for method described\n in method_desc.\n\n Returns:\n Triple (accept, max_size, media_path_url) where:\n - accept is a list of strings representing what content types are\n accepted for media upload. Defaults to empty list if not in the\n discovery document.\n - max_size is a long representing the max size in bytes allowed for a\n media upload. Defaults to 0L if not in the discovery document.\n - media_path_url is a String; the absolute URI for media upload for the\n API method. Constructed using the API root URI and service path from\n the discovery document and the relative path for the API method. If\n media upload is not supported, this is None.\n \"\"\"\n media_upload = method_desc.get('mediaUpload', {})\n accept = media_upload.get('accept', [])\n max_size = _media_size_to_long(media_upload.get('maxSize', ''))\n media_path_url = None\n\n if media_upload:\n media_path_url = _media_path_url_from_info(root_desc, path_url)\n parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()\n parameters['media_mime_type'] = MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE.copy()\n if 'body' in parameters:\n parameters['body']['required'] = False\n\n return accept, max_size, media_path_url\n\n\ndef _fix_up_method_description(method_desc, root_desc):\n \"\"\"Updates a method description in a discovery document.\n\n SIDE EFFECTS: Changes the parameters dictionary in the method description with\n extra parameters which are used locally.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n\n Returns:\n Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)\n where:\n - path_url is a String; the relative URL for the API method. Relative to\n the API root, which is specified in the discovery document.\n - http_method is a String; the HTTP method used to call the API method\n described in the method description.\n - method_id is a String; the name of the RPC method associated with the\n API method, and is in the method description in the 'id' key.\n - accept is a list of strings representing what content types are\n accepted for media upload. Defaults to empty list if not in the\n discovery document.\n - max_size is a long representing the max size in bytes allowed for a\n media upload. Defaults to 0L if not in the discovery document.\n - media_path_url is a String; the absolute URI for media upload for the\n API method. Constructed using the API root URI and service path from\n the discovery document and the relative path for the API method. If\n media upload is not supported, this is None.\n \"\"\"\n path_url = method_desc['path']\n http_method = method_desc['httpMethod']\n method_id = method_desc['id']\n\n parameters = _fix_up_parameters(method_desc, root_desc, http_method)\n # Order is important. `_fix_up_media_upload` needs `method_desc` to have a\n # 'parameters' key and needs to know if there is a 'body' parameter because it\n # also sets a 'media_body' parameter.\n accept, max_size, media_path_url = _fix_up_media_upload(\n method_desc, root_desc, path_url, parameters)\n\n return path_url, http_method, method_id, accept, max_size, media_path_url\n\n\ndef _urljoin(base, url):\n \"\"\"Custom urljoin replacement supporting : before / in url.\"\"\"\n # In general, it's unsafe to simply join base and url. However, for\n # the case of discovery documents, we know:\n # * base will never contain params, query, or fragment\n # * url will never contain a scheme or net_loc.\n # In general, this means we can safely join on /; we just need to\n # ensure we end up with precisely one / joining base and url. The\n # exception here is the case of media uploads, where url will be an\n # absolute url.\n if url.startswith('http://') or url.startswith('https://'):\n return urljoin(base, url)\n new_base = base if base.endswith('/') else base + '/'\n new_url = url[1:] if url.startswith('/') else url\n return new_base + new_url\n\n\n# TODO(dhermes): Convert this class to ResourceMethod and make it callable\nclass ResourceMethodParameters(object):\n \"\"\"Represents the parameters associated with a method.\n\n Attributes:\n argmap: Map from method parameter name (string) to query parameter name\n (string).\n required_params: List of required parameters (represented by parameter\n name as string).\n repeated_params: List of repeated parameters (represented by parameter\n name as string).\n pattern_params: Map from method parameter name (string) to regular\n expression (as a string). If the pattern is set for a parameter, the\n value for that parameter must match the regular expression.\n query_params: List of parameters (represented by parameter name as string)\n that will be used in the query string.\n path_params: Set of parameters (represented by parameter name as string)\n that will be used in the base URL path.\n param_types: Map from method parameter name (string) to parameter type. Type\n can be any valid JSON schema type; valid values are 'any', 'array',\n 'boolean', 'integer', 'number', 'object', or 'string'. Reference:\n http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1\n enum_params: Map from method parameter name (string) to list of strings,\n where each list of strings is the list of acceptable enum values.\n \"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters.\n\n Sets default values and defers to set_parameters to populate.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value\n comes from the dictionary of methods stored in the 'methods' key in\n the deserialized discovery document.\n \"\"\"\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE\n # parsing is gotten rid of.\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n\n self.set_parameters(method_desc)\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description.\n\n Iterates through each parameter for the method and parses the values from\n the parameter dictionary.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value\n comes from the dictionary of methods stored in the 'methods' key in\n the deserialized discovery document.\n \"\"\"\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n\n # TODO(dhermes): Determine if this is still necessary. Discovery based APIs\n # should have all path parameters already marked with\n # 'location: path'.\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n\n\ndef createMethod(methodName, methodDesc, rootDesc, schema):\n \"\"\"Creates a method for attaching to a Resource.\n\n Args:\n methodName: string, name of the method to use.\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n rootDesc: object, the entire deserialized discovery document.\n schema: object, mapping of schema names to schema descriptions.\n \"\"\"\n methodName = fix_method_name(methodName)\n (pathUrl, httpMethod, methodId, accept,\n maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)\n\n parameters = ResourceMethodParameters(methodDesc)\n\n def method(self, **kwargs):\n # Don't bother with doc string, it will be over-written by createMethod.\n\n for name in six.iterkeys(kwargs):\n if name not in parameters.argmap:\n raise TypeError('Got an unexpected keyword argument \"%s\"' % name)\n\n # Remove args that have a value of None.\n keys = list(kwargs.keys())\n for name in keys:\n if kwargs[name] is None:\n del kwargs[name]\n\n for name in parameters.required_params:\n if name not in kwargs:\n # temporary workaround for non-paging methods incorrectly requiring\n # page token parameter (cf. drive.changes.watch vs. drive.changes.list)\n if name not in _PAGE_TOKEN_NAMES or _findPageTokenName(\n _methodProperties(methodDesc, schema, 'response')):\n raise TypeError('Missing required parameter \"%s\"' % name)\n\n for name, regex in six.iteritems(parameters.pattern_params):\n if name in kwargs:\n if isinstance(kwargs[name], six.string_types):\n pvalues = [kwargs[name]]\n else:\n pvalues = kwargs[name]\n for pvalue in pvalues:\n if re.match(regex, pvalue) is None:\n raise TypeError(\n 'Parameter \"%s\" value \"%s\" does not match the pattern \"%s\"' %\n (name, pvalue, regex))\n\n for name, enums in six.iteritems(parameters.enum_params):\n if name in kwargs:\n # We need to handle the case of a repeated enum\n # name differently, since we want to handle both\n # arg='value' and arg=['value1', 'value2']\n if (name in parameters.repeated_params and\n not isinstance(kwargs[name], six.string_types)):\n values = kwargs[name]\n else:\n values = [kwargs[name]]\n for value in values:\n if value not in enums:\n raise TypeError(\n 'Parameter \"%s\" value \"%s\" is not an allowed value in \"%s\"' %\n (name, value, str(enums)))\n\n actual_query_params = {}\n actual_path_params = {}\n for key, value in six.iteritems(kwargs):\n to_type = parameters.param_types.get(key, 'string')\n # For repeated parameters we cast each member of the list.\n if key in parameters.repeated_params and type(value) == type([]):\n cast_value = [_cast(x, to_type) for x in value]\n else:\n cast_value = _cast(value, to_type)\n if key in parameters.query_params:\n actual_query_params[parameters.argmap[key]] = cast_value\n if key in parameters.path_params:\n actual_path_params[parameters.argmap[key]] = cast_value\n body_value = kwargs.get('body', None)\n media_filename = kwargs.get('media_body', None)\n media_mime_type = kwargs.get('media_mime_type', None)\n\n if self._developerKey:\n actual_query_params['key'] = self._developerKey\n\n model = self._model\n if methodName.endswith('_media'):\n model = MediaModel()\n elif 'response' not in methodDesc:\n model = RawModel()\n\n headers = {}\n headers, params, query, body = model.request(headers,\n actual_path_params, actual_query_params, body_value)\n\n expanded_url = uritemplate.expand(pathUrl, params)\n url = _urljoin(self._baseUrl, expanded_url + query)\n\n resumable = None\n multipart_boundary = ''\n\n if media_filename:\n # Ensure we end up with a valid MediaUpload object.\n if isinstance(media_filename, six.string_types):\n if media_mime_type is None:\n logger.warning(\n 'media_mime_type argument not specified: trying to auto-detect for %s',\n media_filename)\n media_mime_type, _ = mimetypes.guess_type(media_filename)\n if media_mime_type is None:\n raise UnknownFileType(media_filename)\n if not mimeparse.best_match([media_mime_type], ','.join(accept)):\n raise UnacceptableMimeTypeError(media_mime_type)\n media_upload = MediaFileUpload(media_filename,\n mimetype=media_mime_type)\n elif isinstance(media_filename, MediaUpload):\n media_upload = media_filename\n else:\n raise TypeError('media_filename must be str or MediaUpload.')\n\n # Check the maxSize\n if media_upload.size() is not None and media_upload.size() > maxSize > 0:\n raise MediaUploadSizeError(\"Media larger than: %s\" % maxSize)\n\n # Use the media path uri for media uploads\n expanded_url = uritemplate.expand(mediaPathUrl, params)\n url = _urljoin(self._baseUrl, expanded_url + query)\n if media_upload.resumable():\n url = _add_query_parameter(url, 'uploadType', 'resumable')\n\n if media_upload.resumable():\n # This is all we need to do for resumable, if the body exists it gets\n # sent in the first request, otherwise an empty body is sent.\n resumable = media_upload\n else:\n # A non-resumable upload\n if body is None:\n # This is a simple media upload\n headers['content-type'] = media_upload.mimetype()\n body = media_upload.getbytes(0, media_upload.size())\n url = _add_query_parameter(url, 'uploadType', 'media')\n else:\n # This is a multipart/related upload.\n msgRoot = MIMEMultipart('related')\n # msgRoot should not write out it's own headers\n setattr(msgRoot, '_write_headers', lambda self: None)\n\n # attach the body as one part\n msg = MIMENonMultipart(*headers['content-type'].split('/'))\n msg.set_payload(body)\n msgRoot.attach(msg)\n\n # attach the media as the second part\n msg = MIMENonMultipart(*media_upload.mimetype().split('/'))\n msg['Content-Transfer-Encoding'] = 'binary'\n\n payload = media_upload.getbytes(0, media_upload.size())\n msg.set_payload(payload)\n msgRoot.attach(msg)\n # encode the body: note that we can't use `as_string`, because\n # it plays games with `From ` lines.\n fp = BytesIO()\n g = _BytesGenerator(fp, mangle_from_=False)\n g.flatten(msgRoot, unixfrom=False)\n body = fp.getvalue()\n\n multipart_boundary = msgRoot.get_boundary()\n headers['content-type'] = ('multipart/related; '\n 'boundary=\"%s\"') % multipart_boundary\n url = _add_query_parameter(url, 'uploadType', 'multipart')\n\n logger.info('URL being requested: %s %s' % (httpMethod,url))\n return self._requestBuilder(self._http,\n model.response,\n url,\n method=httpMethod,\n body=body,\n headers=headers,\n methodId=methodId,\n resumable=resumable)\n\n docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\\n\\n']\n if len(parameters.argmap) > 0:\n docs.append('Args:\\n')\n\n # Skip undocumented params and params common to all methods.\n skip_parameters = list(rootDesc.get('parameters', {}).keys())\n skip_parameters.extend(STACK_QUERY_PARAMETERS)\n\n all_args = list(parameters.argmap.keys())\n args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]\n\n # Move body to the front of the line.\n if 'body' in all_args:\n args_ordered.append('body')\n\n for name in all_args:\n if name not in args_ordered:\n args_ordered.append(name)\n\n for arg in args_ordered:\n if arg in skip_parameters:\n continue\n\n repeated = ''\n if arg in parameters.repeated_params:\n repeated = ' (repeated)'\n required = ''\n if arg in parameters.required_params:\n required = ' (required)'\n paramdesc = methodDesc['parameters'][parameters.argmap[arg]]\n paramdoc = paramdesc.get('description', 'A parameter')\n if '$ref' in paramdesc:\n docs.append(\n (' %s: object, %s%s%s\\n The object takes the'\n ' form of:\\n\\n%s\\n\\n') % (arg, paramdoc, required, repeated,\n schema.prettyPrintByName(paramdesc['$ref'])))\n else:\n paramtype = paramdesc.get('type', 'string')\n docs.append(' %s: %s, %s%s%s\\n' % (arg, paramtype, paramdoc, required,\n repeated))\n enum = paramdesc.get('enum', [])\n enumDesc = paramdesc.get('enumDescriptions', [])\n if enum and enumDesc:\n docs.append(' Allowed values\\n')\n for (name, desc) in zip(enum, enumDesc):\n docs.append(' %s - %s\\n' % (name, desc))\n if 'response' in methodDesc:\n if methodName.endswith('_media'):\n docs.append('\\nReturns:\\n The media object as a string.\\n\\n ')\n else:\n docs.append('\\nReturns:\\n An object of the form:\\n\\n ')\n docs.append(schema.prettyPrintSchema(methodDesc['response']))\n\n setattr(method, '__doc__', ''.join(docs))\n return (methodName, method)\n\n\ndef createNextMethod(methodName,\n pageTokenName='pageToken',\n nextPageTokenName='nextPageToken',\n isPageTokenParameter=True):\n \"\"\"Creates any _next methods for attaching to a Resource.\n\n The _next methods allow for easy iteration through list() responses.\n\n Args:\n methodName: string, name of the method to use.\n pageTokenName: string, name of request page token field.\n nextPageTokenName: string, name of response page token field.\n isPageTokenParameter: Boolean, True if request page token is a query\n parameter, False if request page token is a field of the request body.\n \"\"\"\n methodName = fix_method_name(methodName)\n\n def methodNext(self, previous_request, previous_response):\n \"\"\"Retrieves the next page of results.\n\nArgs:\n previous_request: The request for the previous page. (required)\n previous_response: The response from the request for the previous page. (required)\n\nReturns:\n A request object that you can call 'execute()' on to request the next\n page. Returns None if there are no more items in the collection.\n \"\"\"\n # Retrieve nextPageToken from previous_response\n # Use as pageToken in previous_request to create new request.\n\n nextPageToken = previous_response.get(nextPageTokenName, None)\n if not nextPageToken:\n return None\n\n request = copy.copy(previous_request)\n\n if isPageTokenParameter:\n # Replace pageToken value in URI\n request.uri = _add_query_parameter(\n request.uri, pageTokenName, nextPageToken)\n logger.info('Next page request URL: %s %s' % (methodName, request.uri))\n else:\n # Replace pageToken value in request body\n model = self._model\n body = model.deserialize(request.body)\n body[pageTokenName] = nextPageToken\n request.body = model.serialize(body)\n logger.info('Next page request body: %s %s' % (methodName, body))\n\n return request\n\n return (methodName, methodNext)\n\n\nclass Resource(object):\n \"\"\"A class for interacting with a resource.\"\"\"\n\n def __init__(self, http, baseUrl, model, requestBuilder, developerKey,\n resourceDesc, rootDesc, schema):\n \"\"\"Build a Resource from the API description.\n\n Args:\n http: httplib2.Http, Object to make http requests with.\n baseUrl: string, base URL for the API. All requests are relative to this\n URI.\n model: googleapiclient.Model, converts to and from the wire format.\n requestBuilder: class or callable that instantiates an\n googleapiclient.HttpRequest object.\n developerKey: string, key obtained from\n https://code.google.com/apis/console\n resourceDesc: object, section of deserialized discovery document that\n describes a resource. Note that the top level discovery document\n is considered a resource.\n rootDesc: object, the entire deserialized discovery document.\n schema: object, mapping of schema names to schema descriptions.\n \"\"\"\n self._dynamic_attrs = []\n\n self._http = http\n self._baseUrl = baseUrl\n self._model = model\n self._developerKey = developerKey\n self._requestBuilder = requestBuilder\n self._resourceDesc = resourceDesc\n self._rootDesc = rootDesc\n self._schema = schema\n\n self._set_service_methods()\n\n def _set_dynamic_attr(self, attr_name, value):\n \"\"\"Sets an instance attribute and tracks it in a list of dynamic attributes.\n\n Args:\n attr_name: string; The name of the attribute to be set\n value: The value being set on the object and tracked in the dynamic cache.\n \"\"\"\n self._dynamic_attrs.append(attr_name)\n self.__dict__[attr_name] = value\n\n def __getstate__(self):\n \"\"\"Trim the state down to something that can be pickled.\n\n Uses the fact that the instance variable _dynamic_attrs holds attrs that\n will be wiped and restored on pickle serialization.\n \"\"\"\n state_dict = copy.copy(self.__dict__)\n for dynamic_attr in self._dynamic_attrs:\n del state_dict[dynamic_attr]\n del state_dict['_dynamic_attrs']\n return state_dict\n\n def __setstate__(self, state):\n \"\"\"Reconstitute the state of the object from being pickled.\n\n Uses the fact that the instance variable _dynamic_attrs holds attrs that\n will be wiped and restored on pickle serialization.\n \"\"\"\n self.__dict__.update(state)\n self._dynamic_attrs = []\n self._set_service_methods()\n\n def _set_service_methods(self):\n self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)\n self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)\n self._add_next_methods(self._resourceDesc, self._schema)\n\n def _add_basic_methods(self, resourceDesc, rootDesc, schema):\n # If this is the root Resource, add a new_batch_http_request() method.\n if resourceDesc == rootDesc:\n batch_uri = '%s%s' % (\n rootDesc['rootUrl'], rootDesc.get('batchPath', 'batch'))\n def new_batch_http_request(callback=None):\n \"\"\"Create a BatchHttpRequest object based on the discovery document.\n\n Args:\n callback: callable, A callback to be called for each response, of the\n form callback(id, response, exception). The first parameter is the\n request id, and the second is the deserialized response object. The\n third is an apiclient.errors.HttpError exception object if an HTTP\n error occurred while processing the request, or None if no error\n occurred.\n\n Returns:\n A BatchHttpRequest object based on the discovery document.\n \"\"\"\n return BatchHttpRequest(callback=callback, batch_uri=batch_uri)\n self._set_dynamic_attr('new_batch_http_request', new_batch_http_request)\n\n # Add basic methods to Resource\n if 'methods' in resourceDesc:\n for methodName, methodDesc in six.iteritems(resourceDesc['methods']):\n fixedMethodName, method = createMethod(\n methodName, methodDesc, rootDesc, schema)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n # Add in _media methods. The functionality of the attached method will\n # change when it sees that the method name ends in _media.\n if methodDesc.get('supportsMediaDownload', False):\n fixedMethodName, method = createMethod(\n methodName + '_media', methodDesc, rootDesc, schema)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n def _add_nested_resources(self, resourceDesc, rootDesc, schema):\n # Add in nested resources\n if 'resources' in resourceDesc:\n\n def createResourceMethod(methodName, methodDesc):\n \"\"\"Create a method on the Resource to access a nested Resource.\n\n Args:\n methodName: string, name of the method to use.\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n \"\"\"\n methodName = fix_method_name(methodName)\n\n def methodResource(self):\n return Resource(http=self._http, baseUrl=self._baseUrl,\n model=self._model, developerKey=self._developerKey,\n requestBuilder=self._requestBuilder,\n resourceDesc=methodDesc, rootDesc=rootDesc,\n schema=schema)\n\n setattr(methodResource, '__doc__', 'A collection resource.')\n setattr(methodResource, '__is_resource__', True)\n\n return (methodName, methodResource)\n\n for methodName, methodDesc in six.iteritems(resourceDesc['resources']):\n fixedMethodName, method = createResourceMethod(methodName, methodDesc)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n def _add_next_methods(self, resourceDesc, schema):\n # Add _next() methods if and only if one of the names 'pageToken' or\n # 'nextPageToken' occurs among the fields of both the method's response\n # type either the method's request (query parameters) or request body.\n if 'methods' not in resourceDesc:\n return\n for methodName, methodDesc in six.iteritems(resourceDesc['methods']):\n nextPageTokenName = _findPageTokenName(\n _methodProperties(methodDesc, schema, 'response'))\n if not nextPageTokenName:\n continue\n isPageTokenParameter = True\n pageTokenName = _findPageTokenName(methodDesc.get('parameters', {}))\n if not pageTokenName:\n isPageTokenParameter = False\n pageTokenName = _findPageTokenName(\n _methodProperties(methodDesc, schema, 'request'))\n if not pageTokenName:\n continue\n fixedMethodName, method = createNextMethod(\n methodName + '_next', pageTokenName, nextPageTokenName,\n isPageTokenParameter)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n\ndef _findPageTokenName(fields):\n \"\"\"Search field names for one like a page token.\n\n Args:\n fields: container of string, names of fields.\n\n Returns:\n First name that is either 'pageToken' or 'nextPageToken' if one exists,\n otherwise None.\n \"\"\"\n return next((tokenName for tokenName in _PAGE_TOKEN_NAMES\n if tokenName in fields), None)\n\ndef _methodProperties(methodDesc, schema, name):\n \"\"\"Get properties of a field in a method description.\n\n Args:\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n schema: object, mapping of schema names to schema descriptions.\n name: string, name of top-level field in method description.\n\n Returns:\n Object representing fragment of deserialized discovery document\n corresponding to 'properties' field of object corresponding to named field\n in method description, if it exists, otherwise empty dict.\n \"\"\"\n desc = methodDesc.get(name, {})\n if '$ref' in desc:\n desc = schema.get(desc['$ref'], {})\n return desc.get('properties', {})\n", "path": "googleapiclient/discovery.py" } ]
[ { "content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Client for discovery based APIs.\n\nA client library for Google's discovery based APIs.\n\"\"\"\nfrom __future__ import absolute_import\nimport six\nfrom six.moves import zip\n\n__author__ = '[email protected] (Joe Gregorio)'\n__all__ = [\n 'build',\n 'build_from_document',\n 'fix_method_name',\n 'key2param',\n ]\n\nfrom six import BytesIO\nfrom six.moves import http_client\nfrom six.moves.urllib.parse import urlencode, urlparse, urljoin, \\\n urlunparse, parse_qsl\n\n# Standard library imports\nimport copy\ntry:\n from email.generator import BytesGenerator\nexcept ImportError:\n from email.generator import Generator as BytesGenerator\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nimport json\nimport keyword\nimport logging\nimport mimetypes\nimport os\nimport re\n\n# Third-party imports\nimport httplib2\nimport uritemplate\n\n# Local imports\nfrom googleapiclient import _auth\nfrom googleapiclient import mimeparse\nfrom googleapiclient.errors import HttpError\nfrom googleapiclient.errors import InvalidJsonError\nfrom googleapiclient.errors import MediaUploadSizeError\nfrom googleapiclient.errors import UnacceptableMimeTypeError\nfrom googleapiclient.errors import UnknownApiNameOrVersion\nfrom googleapiclient.errors import UnknownFileType\nfrom googleapiclient.http import build_http\nfrom googleapiclient.http import BatchHttpRequest\nfrom googleapiclient.http import HttpMock\nfrom googleapiclient.http import HttpMockSequence\nfrom googleapiclient.http import HttpRequest\nfrom googleapiclient.http import MediaFileUpload\nfrom googleapiclient.http import MediaUpload\nfrom googleapiclient.model import JsonModel\nfrom googleapiclient.model import MediaModel\nfrom googleapiclient.model import RawModel\nfrom googleapiclient.schema import Schemas\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client.util import _add_query_parameter\n from oauth2client.util import positional\nexcept ImportError:\n from oauth2client._helpers import _add_query_parameter\n from oauth2client._helpers import positional\n\n\n# The client library requires a version of httplib2 that supports RETRIES.\nhttplib2.RETRIES = 1\n\nlogger = logging.getLogger(__name__)\n\nURITEMPLATE = re.compile('{[^}]*}')\nVARNAME = re.compile('[a-zA-Z0-9_-]+')\nDISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'\n '{api}/{apiVersion}/rest')\nV1_DISCOVERY_URI = DISCOVERY_URI\nV2_DISCOVERY_URI = ('https://{api}.googleapis.com/$discovery/rest?'\n 'version={apiVersion}')\nDEFAULT_METHOD_DOC = 'A description of how to use this function'\nHTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])\n\n_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}\nBODY_PARAMETER_DEFAULT_VALUE = {\n 'description': 'The request body.',\n 'type': 'object',\n 'required': True,\n}\nMEDIA_BODY_PARAMETER_DEFAULT_VALUE = {\n 'description': ('The filename of the media request body, or an instance '\n 'of a MediaUpload object.'),\n 'type': 'string',\n 'required': False,\n}\nMEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE = {\n 'description': ('The MIME type of the media request body, or an instance '\n 'of a MediaUpload object.'),\n 'type': 'string',\n 'required': False,\n}\n_PAGE_TOKEN_NAMES = ('pageToken', 'nextPageToken')\n\n# Parameters accepted by the stack, but not visible via discovery.\n# TODO(dhermes): Remove 'userip' in 'v2'.\nSTACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])\nSTACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}\n\n# Library-specific reserved words beyond Python keywords.\nRESERVED_WORDS = frozenset(['body'])\n\n# patch _write_lines to avoid munging '\\r' into '\\n'\n# ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )\nclass _BytesGenerator(BytesGenerator):\n _write_lines = BytesGenerator.write\n\ndef fix_method_name(name):\n \"\"\"Fix method names to avoid reserved word conflicts.\n\n Args:\n name: string, method name.\n\n Returns:\n The name with an '_' appended if the name is a reserved word.\n \"\"\"\n if keyword.iskeyword(name) or name in RESERVED_WORDS:\n return name + '_'\n else:\n return name\n\n\ndef key2param(key):\n \"\"\"Converts key names into parameter names.\n\n For example, converting \"max-results\" -> \"max_results\"\n\n Args:\n key: string, the method key name.\n\n Returns:\n A safe method name based on the key name.\n \"\"\"\n result = []\n key = list(key)\n if not key[0].isalpha():\n result.append('x')\n for c in key:\n if c.isalnum():\n result.append(c)\n else:\n result.append('_')\n\n return ''.join(result)\n\n\n@positional(2)\ndef build(serviceName,\n version,\n http=None,\n discoveryServiceUrl=DISCOVERY_URI,\n developerKey=None,\n model=None,\n requestBuilder=HttpRequest,\n credentials=None,\n cache_discovery=True,\n cache=None):\n \"\"\"Construct a Resource for interacting with an API.\n\n Construct a Resource object for interacting with an API. The serviceName and\n version are the names from the Discovery service.\n\n Args:\n serviceName: string, name of the service.\n version: string, the version of the service.\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it that HTTP requests will be made through.\n discoveryServiceUrl: string, a URI Template that points to the location of\n the discovery service. It should have two parameters {api} and\n {apiVersion} that when filled in produce an absolute URI to the discovery\n document for that service.\n developerKey: string, key obtained from\n https://code.google.com/apis/console.\n model: googleapiclient.Model, converts to and from the wire format.\n requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP\n request.\n credentials: oauth2client.Credentials or\n google.auth.credentials.Credentials, credentials to be used for\n authentication.\n cache_discovery: Boolean, whether or not to cache the discovery doc.\n cache: googleapiclient.discovery_cache.base.CacheBase, an optional\n cache object for the discovery documents.\n\n Returns:\n A Resource object with methods for interacting with the service.\n \"\"\"\n params = {\n 'api': serviceName,\n 'apiVersion': version\n }\n\n if http is None:\n discovery_http = build_http()\n else:\n discovery_http = http\n\n for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI,):\n requested_url = uritemplate.expand(discovery_url, params)\n\n try:\n content = _retrieve_discovery_doc(\n requested_url, discovery_http, cache_discovery, cache)\n return build_from_document(content, base=discovery_url, http=http,\n developerKey=developerKey, model=model, requestBuilder=requestBuilder,\n credentials=credentials)\n except HttpError as e:\n if e.resp.status == http_client.NOT_FOUND:\n continue\n else:\n raise e\n\n raise UnknownApiNameOrVersion(\n \"name: %s version: %s\" % (serviceName, version))\n\n\ndef _retrieve_discovery_doc(url, http, cache_discovery, cache=None):\n \"\"\"Retrieves the discovery_doc from cache or the internet.\n\n Args:\n url: string, the URL of the discovery document.\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it through which HTTP requests will be made.\n cache_discovery: Boolean, whether or not to cache the discovery doc.\n cache: googleapiclient.discovery_cache.base.Cache, an optional cache\n object for the discovery documents.\n\n Returns:\n A unicode string representation of the discovery document.\n \"\"\"\n if cache_discovery:\n from . import discovery_cache\n from .discovery_cache import base\n if cache is None:\n cache = discovery_cache.autodetect()\n if cache:\n content = cache.get(url)\n if content:\n return content\n\n actual_url = url\n # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment\n # variable that contains the network address of the client sending the\n # request. If it exists then add that to the request for the discovery\n # document to avoid exceeding the quota on discovery requests.\n if 'REMOTE_ADDR' in os.environ:\n actual_url = _add_query_parameter(url, 'userIp', os.environ['REMOTE_ADDR'])\n logger.info('URL being requested: GET %s', actual_url)\n\n resp, content = http.request(actual_url)\n\n if resp.status >= 400:\n raise HttpError(resp, content, uri=actual_url)\n\n try:\n content = content.decode('utf-8')\n except AttributeError:\n pass\n\n try:\n service = json.loads(content)\n except ValueError as e:\n logger.error('Failed to parse as JSON: ' + content)\n raise InvalidJsonError()\n if cache_discovery and cache:\n cache.set(url, content)\n return content\n\n\n@positional(1)\ndef build_from_document(\n service,\n base=None,\n future=None,\n http=None,\n developerKey=None,\n model=None,\n requestBuilder=HttpRequest,\n credentials=None):\n \"\"\"Create a Resource for interacting with an API.\n\n Same as `build()`, but constructs the Resource object from a discovery\n document that is it given, as opposed to retrieving one over HTTP.\n\n Args:\n service: string or object, the JSON discovery document describing the API.\n The value passed in may either be the JSON string or the deserialized\n JSON.\n base: string, base URI for all HTTP requests, usually the discovery URI.\n This parameter is no longer used as rootUrl and servicePath are included\n within the discovery document. (deprecated)\n future: string, discovery document with future capabilities (deprecated).\n http: httplib2.Http, An instance of httplib2.Http or something that acts\n like it that HTTP requests will be made through.\n developerKey: string, Key for controlling API usage, generated\n from the API Console.\n model: Model class instance that serializes and de-serializes requests and\n responses.\n requestBuilder: Takes an http request and packages it up to be executed.\n credentials: oauth2client.Credentials or\n google.auth.credentials.Credentials, credentials to be used for\n authentication.\n\n Returns:\n A Resource object with methods for interacting with the service.\n \"\"\"\n\n if http is not None and credentials is not None:\n raise ValueError('Arguments http and credentials are mutually exclusive.')\n\n if isinstance(service, six.string_types):\n service = json.loads(service)\n\n if 'rootUrl' not in service and (isinstance(http, (HttpMock,\n HttpMockSequence))):\n logger.error(\"You are using HttpMock or HttpMockSequence without\" +\n \"having the service discovery doc in cache. Try calling \" +\n \"build() without mocking once first to populate the \" +\n \"cache.\")\n raise InvalidJsonError()\n\n base = urljoin(service['rootUrl'], service['servicePath'])\n schema = Schemas(service)\n\n # If the http client is not specified, then we must construct an http client\n # to make requests. If the service has scopes, then we also need to setup\n # authentication.\n if http is None:\n # Does the service require scopes?\n scopes = list(\n service.get('auth', {}).get('oauth2', {}).get('scopes', {}).keys())\n\n # If so, then the we need to setup authentication if no developerKey is\n # specified.\n if scopes and not developerKey:\n # If the user didn't pass in credentials, attempt to acquire application\n # default credentials.\n if credentials is None:\n credentials = _auth.default_credentials()\n\n # The credentials need to be scoped.\n credentials = _auth.with_scopes(credentials, scopes)\n\n # Create an authorized http instance\n http = _auth.authorized_http(credentials)\n\n # If the service doesn't require scopes then there is no need for\n # authentication.\n else:\n http = build_http()\n\n if model is None:\n features = service.get('features', [])\n model = JsonModel('dataWrapper' in features)\n\n return Resource(http=http, baseUrl=base, model=model,\n developerKey=developerKey, requestBuilder=requestBuilder,\n resourceDesc=service, rootDesc=service, schema=schema)\n\n\ndef _cast(value, schema_type):\n \"\"\"Convert value to a string based on JSON Schema type.\n\n See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on\n JSON Schema.\n\n Args:\n value: any, the value to convert\n schema_type: string, the type that value should be interpreted as\n\n Returns:\n A string representation of 'value' based on the schema_type.\n \"\"\"\n if schema_type == 'string':\n if type(value) == type('') or type(value) == type(u''):\n return value\n else:\n return str(value)\n elif schema_type == 'integer':\n return str(int(value))\n elif schema_type == 'number':\n return str(float(value))\n elif schema_type == 'boolean':\n return str(bool(value)).lower()\n else:\n if type(value) == type('') or type(value) == type(u''):\n return value\n else:\n return str(value)\n\n\ndef _media_size_to_long(maxSize):\n \"\"\"Convert a string media size, such as 10GB or 3TB into an integer.\n\n Args:\n maxSize: string, size as a string, such as 2MB or 7GB.\n\n Returns:\n The size as an integer value.\n \"\"\"\n if len(maxSize) < 2:\n return 0\n units = maxSize[-2:].upper()\n bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)\n if bit_shift is not None:\n return int(maxSize[:-2]) << bit_shift\n else:\n return int(maxSize)\n\n\ndef _media_path_url_from_info(root_desc, path_url):\n \"\"\"Creates an absolute media path URL.\n\n Constructed using the API root URI and service path from the discovery\n document and the relative path for the API method.\n\n Args:\n root_desc: Dictionary; the entire original deserialized discovery document.\n path_url: String; the relative URL for the API method. Relative to the API\n root, which is specified in the discovery document.\n\n Returns:\n String; the absolute URI for media upload for the API method.\n \"\"\"\n return '%(root)supload/%(service_path)s%(path)s' % {\n 'root': root_desc['rootUrl'],\n 'service_path': root_desc['servicePath'],\n 'path': path_url,\n }\n\n\ndef _fix_up_parameters(method_desc, root_desc, http_method):\n \"\"\"Updates parameters of an API method with values specific to this library.\n\n Specifically, adds whatever global parameters are specified by the API to the\n parameters for the individual method. Also adds parameters which don't\n appear in the discovery document, but are available to all discovery based\n APIs (these are listed in STACK_QUERY_PARAMETERS).\n\n SIDE EFFECTS: This updates the parameters dictionary object in the method\n description.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n http_method: String; the HTTP method used to call the API method described\n in method_desc.\n\n Returns:\n The updated Dictionary stored in the 'parameters' key of the method\n description dictionary.\n \"\"\"\n parameters = method_desc.setdefault('parameters', {})\n\n # Add in the parameters common to all methods.\n for name, description in six.iteritems(root_desc.get('parameters', {})):\n parameters[name] = description\n\n # Add in undocumented query parameters.\n for name in STACK_QUERY_PARAMETERS:\n parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()\n\n # Add 'body' (our own reserved word) to parameters if the method supports\n # a request payload.\n if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:\n body = BODY_PARAMETER_DEFAULT_VALUE.copy()\n body.update(method_desc['request'])\n parameters['body'] = body\n\n return parameters\n\n\ndef _fix_up_media_upload(method_desc, root_desc, path_url, parameters):\n \"\"\"Adds 'media_body' and 'media_mime_type' parameters if supported by method.\n\n SIDE EFFECTS: If the method supports media upload and has a required body,\n sets body to be optional (required=False) instead. Also, if there is a\n 'mediaUpload' in the method description, adds 'media_upload' key to\n parameters.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n path_url: String; the relative URL for the API method. Relative to the API\n root, which is specified in the discovery document.\n parameters: A dictionary describing method parameters for method described\n in method_desc.\n\n Returns:\n Triple (accept, max_size, media_path_url) where:\n - accept is a list of strings representing what content types are\n accepted for media upload. Defaults to empty list if not in the\n discovery document.\n - max_size is a long representing the max size in bytes allowed for a\n media upload. Defaults to 0L if not in the discovery document.\n - media_path_url is a String; the absolute URI for media upload for the\n API method. Constructed using the API root URI and service path from\n the discovery document and the relative path for the API method. If\n media upload is not supported, this is None.\n \"\"\"\n media_upload = method_desc.get('mediaUpload', {})\n accept = media_upload.get('accept', [])\n max_size = _media_size_to_long(media_upload.get('maxSize', ''))\n media_path_url = None\n\n if media_upload:\n media_path_url = _media_path_url_from_info(root_desc, path_url)\n parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()\n parameters['media_mime_type'] = MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE.copy()\n if 'body' in parameters:\n parameters['body']['required'] = False\n\n return accept, max_size, media_path_url\n\n\ndef _fix_up_method_description(method_desc, root_desc):\n \"\"\"Updates a method description in a discovery document.\n\n SIDE EFFECTS: Changes the parameters dictionary in the method description with\n extra parameters which are used locally.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value comes\n from the dictionary of methods stored in the 'methods' key in the\n deserialized discovery document.\n root_desc: Dictionary; the entire original deserialized discovery document.\n\n Returns:\n Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)\n where:\n - path_url is a String; the relative URL for the API method. Relative to\n the API root, which is specified in the discovery document.\n - http_method is a String; the HTTP method used to call the API method\n described in the method description.\n - method_id is a String; the name of the RPC method associated with the\n API method, and is in the method description in the 'id' key.\n - accept is a list of strings representing what content types are\n accepted for media upload. Defaults to empty list if not in the\n discovery document.\n - max_size is a long representing the max size in bytes allowed for a\n media upload. Defaults to 0L if not in the discovery document.\n - media_path_url is a String; the absolute URI for media upload for the\n API method. Constructed using the API root URI and service path from\n the discovery document and the relative path for the API method. If\n media upload is not supported, this is None.\n \"\"\"\n path_url = method_desc['path']\n http_method = method_desc['httpMethod']\n method_id = method_desc['id']\n\n parameters = _fix_up_parameters(method_desc, root_desc, http_method)\n # Order is important. `_fix_up_media_upload` needs `method_desc` to have a\n # 'parameters' key and needs to know if there is a 'body' parameter because it\n # also sets a 'media_body' parameter.\n accept, max_size, media_path_url = _fix_up_media_upload(\n method_desc, root_desc, path_url, parameters)\n\n return path_url, http_method, method_id, accept, max_size, media_path_url\n\n\ndef _urljoin(base, url):\n \"\"\"Custom urljoin replacement supporting : before / in url.\"\"\"\n # In general, it's unsafe to simply join base and url. However, for\n # the case of discovery documents, we know:\n # * base will never contain params, query, or fragment\n # * url will never contain a scheme or net_loc.\n # In general, this means we can safely join on /; we just need to\n # ensure we end up with precisely one / joining base and url. The\n # exception here is the case of media uploads, where url will be an\n # absolute url.\n if url.startswith('http://') or url.startswith('https://'):\n return urljoin(base, url)\n new_base = base if base.endswith('/') else base + '/'\n new_url = url[1:] if url.startswith('/') else url\n return new_base + new_url\n\n\n# TODO(dhermes): Convert this class to ResourceMethod and make it callable\nclass ResourceMethodParameters(object):\n \"\"\"Represents the parameters associated with a method.\n\n Attributes:\n argmap: Map from method parameter name (string) to query parameter name\n (string).\n required_params: List of required parameters (represented by parameter\n name as string).\n repeated_params: List of repeated parameters (represented by parameter\n name as string).\n pattern_params: Map from method parameter name (string) to regular\n expression (as a string). If the pattern is set for a parameter, the\n value for that parameter must match the regular expression.\n query_params: List of parameters (represented by parameter name as string)\n that will be used in the query string.\n path_params: Set of parameters (represented by parameter name as string)\n that will be used in the base URL path.\n param_types: Map from method parameter name (string) to parameter type. Type\n can be any valid JSON schema type; valid values are 'any', 'array',\n 'boolean', 'integer', 'number', 'object', or 'string'. Reference:\n http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1\n enum_params: Map from method parameter name (string) to list of strings,\n where each list of strings is the list of acceptable enum values.\n \"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters.\n\n Sets default values and defers to set_parameters to populate.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value\n comes from the dictionary of methods stored in the 'methods' key in\n the deserialized discovery document.\n \"\"\"\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE\n # parsing is gotten rid of.\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n\n self.set_parameters(method_desc)\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description.\n\n Iterates through each parameter for the method and parses the values from\n the parameter dictionary.\n\n Args:\n method_desc: Dictionary with metadata describing an API method. Value\n comes from the dictionary of methods stored in the 'methods' key in\n the deserialized discovery document.\n \"\"\"\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n\n # TODO(dhermes): Determine if this is still necessary. Discovery based APIs\n # should have all path parameters already marked with\n # 'location: path'.\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n\n\ndef createMethod(methodName, methodDesc, rootDesc, schema):\n \"\"\"Creates a method for attaching to a Resource.\n\n Args:\n methodName: string, name of the method to use.\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n rootDesc: object, the entire deserialized discovery document.\n schema: object, mapping of schema names to schema descriptions.\n \"\"\"\n methodName = fix_method_name(methodName)\n (pathUrl, httpMethod, methodId, accept,\n maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)\n\n parameters = ResourceMethodParameters(methodDesc)\n\n def method(self, **kwargs):\n # Don't bother with doc string, it will be over-written by createMethod.\n\n for name in six.iterkeys(kwargs):\n if name not in parameters.argmap:\n raise TypeError('Got an unexpected keyword argument \"%s\"' % name)\n\n # Remove args that have a value of None.\n keys = list(kwargs.keys())\n for name in keys:\n if kwargs[name] is None:\n del kwargs[name]\n\n for name in parameters.required_params:\n if name not in kwargs:\n # temporary workaround for non-paging methods incorrectly requiring\n # page token parameter (cf. drive.changes.watch vs. drive.changes.list)\n if name not in _PAGE_TOKEN_NAMES or _findPageTokenName(\n _methodProperties(methodDesc, schema, 'response')):\n raise TypeError('Missing required parameter \"%s\"' % name)\n\n for name, regex in six.iteritems(parameters.pattern_params):\n if name in kwargs:\n if isinstance(kwargs[name], six.string_types):\n pvalues = [kwargs[name]]\n else:\n pvalues = kwargs[name]\n for pvalue in pvalues:\n if re.match(regex, pvalue) is None:\n raise TypeError(\n 'Parameter \"%s\" value \"%s\" does not match the pattern \"%s\"' %\n (name, pvalue, regex))\n\n for name, enums in six.iteritems(parameters.enum_params):\n if name in kwargs:\n # We need to handle the case of a repeated enum\n # name differently, since we want to handle both\n # arg='value' and arg=['value1', 'value2']\n if (name in parameters.repeated_params and\n not isinstance(kwargs[name], six.string_types)):\n values = kwargs[name]\n else:\n values = [kwargs[name]]\n for value in values:\n if value not in enums:\n raise TypeError(\n 'Parameter \"%s\" value \"%s\" is not an allowed value in \"%s\"' %\n (name, value, str(enums)))\n\n actual_query_params = {}\n actual_path_params = {}\n for key, value in six.iteritems(kwargs):\n to_type = parameters.param_types.get(key, 'string')\n # For repeated parameters we cast each member of the list.\n if key in parameters.repeated_params and type(value) == type([]):\n cast_value = [_cast(x, to_type) for x in value]\n else:\n cast_value = _cast(value, to_type)\n if key in parameters.query_params:\n actual_query_params[parameters.argmap[key]] = cast_value\n if key in parameters.path_params:\n actual_path_params[parameters.argmap[key]] = cast_value\n body_value = kwargs.get('body', None)\n media_filename = kwargs.get('media_body', None)\n media_mime_type = kwargs.get('media_mime_type', None)\n\n if self._developerKey:\n actual_query_params['key'] = self._developerKey\n\n model = self._model\n if methodName.endswith('_media'):\n model = MediaModel()\n elif 'response' not in methodDesc:\n model = RawModel()\n\n headers = {}\n headers, params, query, body = model.request(headers,\n actual_path_params, actual_query_params, body_value)\n\n expanded_url = uritemplate.expand(pathUrl, params)\n url = _urljoin(self._baseUrl, expanded_url + query)\n\n resumable = None\n multipart_boundary = ''\n\n if media_filename:\n # Ensure we end up with a valid MediaUpload object.\n if isinstance(media_filename, six.string_types):\n if media_mime_type is None:\n logger.warning(\n 'media_mime_type argument not specified: trying to auto-detect for %s',\n media_filename)\n media_mime_type, _ = mimetypes.guess_type(media_filename)\n if media_mime_type is None:\n raise UnknownFileType(media_filename)\n if not mimeparse.best_match([media_mime_type], ','.join(accept)):\n raise UnacceptableMimeTypeError(media_mime_type)\n media_upload = MediaFileUpload(media_filename,\n mimetype=media_mime_type)\n elif isinstance(media_filename, MediaUpload):\n media_upload = media_filename\n else:\n raise TypeError('media_filename must be str or MediaUpload.')\n\n # Check the maxSize\n if media_upload.size() is not None and media_upload.size() > maxSize > 0:\n raise MediaUploadSizeError(\"Media larger than: %s\" % maxSize)\n\n # Use the media path uri for media uploads\n expanded_url = uritemplate.expand(mediaPathUrl, params)\n url = _urljoin(self._baseUrl, expanded_url + query)\n if media_upload.resumable():\n url = _add_query_parameter(url, 'uploadType', 'resumable')\n\n if media_upload.resumable():\n # This is all we need to do for resumable, if the body exists it gets\n # sent in the first request, otherwise an empty body is sent.\n resumable = media_upload\n else:\n # A non-resumable upload\n if body is None:\n # This is a simple media upload\n headers['content-type'] = media_upload.mimetype()\n body = media_upload.getbytes(0, media_upload.size())\n url = _add_query_parameter(url, 'uploadType', 'media')\n else:\n # This is a multipart/related upload.\n msgRoot = MIMEMultipart('related')\n # msgRoot should not write out it's own headers\n setattr(msgRoot, '_write_headers', lambda self: None)\n\n # attach the body as one part\n msg = MIMENonMultipart(*headers['content-type'].split('/'))\n msg.set_payload(body)\n msgRoot.attach(msg)\n\n # attach the media as the second part\n msg = MIMENonMultipart(*media_upload.mimetype().split('/'))\n msg['Content-Transfer-Encoding'] = 'binary'\n\n payload = media_upload.getbytes(0, media_upload.size())\n msg.set_payload(payload)\n msgRoot.attach(msg)\n # encode the body: note that we can't use `as_string`, because\n # it plays games with `From ` lines.\n fp = BytesIO()\n g = _BytesGenerator(fp, mangle_from_=False)\n g.flatten(msgRoot, unixfrom=False)\n body = fp.getvalue()\n\n multipart_boundary = msgRoot.get_boundary()\n headers['content-type'] = ('multipart/related; '\n 'boundary=\"%s\"') % multipart_boundary\n url = _add_query_parameter(url, 'uploadType', 'multipart')\n\n logger.info('URL being requested: %s %s' % (httpMethod,url))\n return self._requestBuilder(self._http,\n model.response,\n url,\n method=httpMethod,\n body=body,\n headers=headers,\n methodId=methodId,\n resumable=resumable)\n\n docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\\n\\n']\n if len(parameters.argmap) > 0:\n docs.append('Args:\\n')\n\n # Skip undocumented params and params common to all methods.\n skip_parameters = list(rootDesc.get('parameters', {}).keys())\n skip_parameters.extend(STACK_QUERY_PARAMETERS)\n\n all_args = list(parameters.argmap.keys())\n args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]\n\n # Move body to the front of the line.\n if 'body' in all_args:\n args_ordered.append('body')\n\n for name in all_args:\n if name not in args_ordered:\n args_ordered.append(name)\n\n for arg in args_ordered:\n if arg in skip_parameters:\n continue\n\n repeated = ''\n if arg in parameters.repeated_params:\n repeated = ' (repeated)'\n required = ''\n if arg in parameters.required_params:\n required = ' (required)'\n paramdesc = methodDesc['parameters'][parameters.argmap[arg]]\n paramdoc = paramdesc.get('description', 'A parameter')\n if '$ref' in paramdesc:\n docs.append(\n (' %s: object, %s%s%s\\n The object takes the'\n ' form of:\\n\\n%s\\n\\n') % (arg, paramdoc, required, repeated,\n schema.prettyPrintByName(paramdesc['$ref'])))\n else:\n paramtype = paramdesc.get('type', 'string')\n docs.append(' %s: %s, %s%s%s\\n' % (arg, paramtype, paramdoc, required,\n repeated))\n enum = paramdesc.get('enum', [])\n enumDesc = paramdesc.get('enumDescriptions', [])\n if enum and enumDesc:\n docs.append(' Allowed values\\n')\n for (name, desc) in zip(enum, enumDesc):\n docs.append(' %s - %s\\n' % (name, desc))\n if 'response' in methodDesc:\n if methodName.endswith('_media'):\n docs.append('\\nReturns:\\n The media object as a string.\\n\\n ')\n else:\n docs.append('\\nReturns:\\n An object of the form:\\n\\n ')\n docs.append(schema.prettyPrintSchema(methodDesc['response']))\n\n setattr(method, '__doc__', ''.join(docs))\n return (methodName, method)\n\n\ndef createNextMethod(methodName,\n pageTokenName='pageToken',\n nextPageTokenName='nextPageToken',\n isPageTokenParameter=True):\n \"\"\"Creates any _next methods for attaching to a Resource.\n\n The _next methods allow for easy iteration through list() responses.\n\n Args:\n methodName: string, name of the method to use.\n pageTokenName: string, name of request page token field.\n nextPageTokenName: string, name of response page token field.\n isPageTokenParameter: Boolean, True if request page token is a query\n parameter, False if request page token is a field of the request body.\n \"\"\"\n methodName = fix_method_name(methodName)\n\n def methodNext(self, previous_request, previous_response):\n \"\"\"Retrieves the next page of results.\n\nArgs:\n previous_request: The request for the previous page. (required)\n previous_response: The response from the request for the previous page. (required)\n\nReturns:\n A request object that you can call 'execute()' on to request the next\n page. Returns None if there are no more items in the collection.\n \"\"\"\n # Retrieve nextPageToken from previous_response\n # Use as pageToken in previous_request to create new request.\n\n nextPageToken = previous_response.get(nextPageTokenName, None)\n if not nextPageToken:\n return None\n\n request = copy.copy(previous_request)\n\n if isPageTokenParameter:\n # Replace pageToken value in URI\n request.uri = _add_query_parameter(\n request.uri, pageTokenName, nextPageToken)\n logger.info('Next page request URL: %s %s' % (methodName, request.uri))\n else:\n # Replace pageToken value in request body\n model = self._model\n body = model.deserialize(request.body)\n body[pageTokenName] = nextPageToken\n request.body = model.serialize(body)\n logger.info('Next page request body: %s %s' % (methodName, body))\n\n return request\n\n return (methodName, methodNext)\n\n\nclass Resource(object):\n \"\"\"A class for interacting with a resource.\"\"\"\n\n def __init__(self, http, baseUrl, model, requestBuilder, developerKey,\n resourceDesc, rootDesc, schema):\n \"\"\"Build a Resource from the API description.\n\n Args:\n http: httplib2.Http, Object to make http requests with.\n baseUrl: string, base URL for the API. All requests are relative to this\n URI.\n model: googleapiclient.Model, converts to and from the wire format.\n requestBuilder: class or callable that instantiates an\n googleapiclient.HttpRequest object.\n developerKey: string, key obtained from\n https://code.google.com/apis/console\n resourceDesc: object, section of deserialized discovery document that\n describes a resource. Note that the top level discovery document\n is considered a resource.\n rootDesc: object, the entire deserialized discovery document.\n schema: object, mapping of schema names to schema descriptions.\n \"\"\"\n self._dynamic_attrs = []\n\n self._http = http\n self._baseUrl = baseUrl\n self._model = model\n self._developerKey = developerKey\n self._requestBuilder = requestBuilder\n self._resourceDesc = resourceDesc\n self._rootDesc = rootDesc\n self._schema = schema\n\n self._set_service_methods()\n\n def _set_dynamic_attr(self, attr_name, value):\n \"\"\"Sets an instance attribute and tracks it in a list of dynamic attributes.\n\n Args:\n attr_name: string; The name of the attribute to be set\n value: The value being set on the object and tracked in the dynamic cache.\n \"\"\"\n self._dynamic_attrs.append(attr_name)\n self.__dict__[attr_name] = value\n\n def __getstate__(self):\n \"\"\"Trim the state down to something that can be pickled.\n\n Uses the fact that the instance variable _dynamic_attrs holds attrs that\n will be wiped and restored on pickle serialization.\n \"\"\"\n state_dict = copy.copy(self.__dict__)\n for dynamic_attr in self._dynamic_attrs:\n del state_dict[dynamic_attr]\n del state_dict['_dynamic_attrs']\n return state_dict\n\n def __setstate__(self, state):\n \"\"\"Reconstitute the state of the object from being pickled.\n\n Uses the fact that the instance variable _dynamic_attrs holds attrs that\n will be wiped and restored on pickle serialization.\n \"\"\"\n self.__dict__.update(state)\n self._dynamic_attrs = []\n self._set_service_methods()\n\n def _set_service_methods(self):\n self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)\n self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)\n self._add_next_methods(self._resourceDesc, self._schema)\n\n def _add_basic_methods(self, resourceDesc, rootDesc, schema):\n # If this is the root Resource, add a new_batch_http_request() method.\n if resourceDesc == rootDesc:\n batch_uri = '%s%s' % (\n rootDesc['rootUrl'], rootDesc.get('batchPath', 'batch'))\n def new_batch_http_request(callback=None):\n \"\"\"Create a BatchHttpRequest object based on the discovery document.\n\n Args:\n callback: callable, A callback to be called for each response, of the\n form callback(id, response, exception). The first parameter is the\n request id, and the second is the deserialized response object. The\n third is an apiclient.errors.HttpError exception object if an HTTP\n error occurred while processing the request, or None if no error\n occurred.\n\n Returns:\n A BatchHttpRequest object based on the discovery document.\n \"\"\"\n return BatchHttpRequest(callback=callback, batch_uri=batch_uri)\n self._set_dynamic_attr('new_batch_http_request', new_batch_http_request)\n\n # Add basic methods to Resource\n if 'methods' in resourceDesc:\n for methodName, methodDesc in six.iteritems(resourceDesc['methods']):\n fixedMethodName, method = createMethod(\n methodName, methodDesc, rootDesc, schema)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n # Add in _media methods. The functionality of the attached method will\n # change when it sees that the method name ends in _media.\n if methodDesc.get('supportsMediaDownload', False):\n fixedMethodName, method = createMethod(\n methodName + '_media', methodDesc, rootDesc, schema)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n def _add_nested_resources(self, resourceDesc, rootDesc, schema):\n # Add in nested resources\n if 'resources' in resourceDesc:\n\n def createResourceMethod(methodName, methodDesc):\n \"\"\"Create a method on the Resource to access a nested Resource.\n\n Args:\n methodName: string, name of the method to use.\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n \"\"\"\n methodName = fix_method_name(methodName)\n\n def methodResource(self):\n return Resource(http=self._http, baseUrl=self._baseUrl,\n model=self._model, developerKey=self._developerKey,\n requestBuilder=self._requestBuilder,\n resourceDesc=methodDesc, rootDesc=rootDesc,\n schema=schema)\n\n setattr(methodResource, '__doc__', 'A collection resource.')\n setattr(methodResource, '__is_resource__', True)\n\n return (methodName, methodResource)\n\n for methodName, methodDesc in six.iteritems(resourceDesc['resources']):\n fixedMethodName, method = createResourceMethod(methodName, methodDesc)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n def _add_next_methods(self, resourceDesc, schema):\n # Add _next() methods if and only if one of the names 'pageToken' or\n # 'nextPageToken' occurs among the fields of both the method's response\n # type either the method's request (query parameters) or request body.\n if 'methods' not in resourceDesc:\n return\n for methodName, methodDesc in six.iteritems(resourceDesc['methods']):\n nextPageTokenName = _findPageTokenName(\n _methodProperties(methodDesc, schema, 'response'))\n if not nextPageTokenName:\n continue\n isPageTokenParameter = True\n pageTokenName = _findPageTokenName(methodDesc.get('parameters', {}))\n if not pageTokenName:\n isPageTokenParameter = False\n pageTokenName = _findPageTokenName(\n _methodProperties(methodDesc, schema, 'request'))\n if not pageTokenName:\n continue\n fixedMethodName, method = createNextMethod(\n methodName + '_next', pageTokenName, nextPageTokenName,\n isPageTokenParameter)\n self._set_dynamic_attr(fixedMethodName,\n method.__get__(self, self.__class__))\n\n\ndef _findPageTokenName(fields):\n \"\"\"Search field names for one like a page token.\n\n Args:\n fields: container of string, names of fields.\n\n Returns:\n First name that is either 'pageToken' or 'nextPageToken' if one exists,\n otherwise None.\n \"\"\"\n return next((tokenName for tokenName in _PAGE_TOKEN_NAMES\n if tokenName in fields), None)\n\ndef _methodProperties(methodDesc, schema, name):\n \"\"\"Get properties of a field in a method description.\n\n Args:\n methodDesc: object, fragment of deserialized discovery document that\n describes the method.\n schema: object, mapping of schema names to schema descriptions.\n name: string, name of top-level field in method description.\n\n Returns:\n Object representing fragment of deserialized discovery document\n corresponding to 'properties' field of object corresponding to named field\n in method description, if it exists, otherwise empty dict.\n \"\"\"\n desc = methodDesc.get(name, {})\n if '$ref' in desc:\n desc = schema.get(desc['$ref'], {})\n return desc.get('properties', {})\n", "path": "googleapiclient/discovery.py" } ]
diff --git a/googleapiclient/discovery.py b/googleapiclient/discovery.py index 5c8624902da..e8b35273f90 100644 --- a/googleapiclient/discovery.py +++ b/googleapiclient/discovery.py @@ -138,7 +138,7 @@ def fix_method_name(name): name: string, method name. Returns: - The name with a '_' prefixed if the name is a reserved word. + The name with an '_' appended if the name is a reserved word. """ if keyword.iskeyword(name) or name in RESERVED_WORDS: return name + '_'
facebookresearch__hydra-1593
[Bug] Config composition error with latest version of OmegaConf # 🐛 Bug ## Description When using OmegaConf at commit 2dd15f9 (first commit where this problem occurs), there are multiple Hydra tests failures, for instance: ``` pytest "tests/test_basic_launcher.py::TestBasicLauncher::test_sweep_1_job[basic-overrides0]" (...) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = __INVALID__, value = None def validate_and_convert(self, value: Any) -> Any: """ Validates input and converts to canonical form :param value: input value :return: converted value ("100" may be converted to 100 for example) """ if value is None: if self._is_optional(): return None > raise ValidationError("Non optional field cannot be assigned None") E hydra.errors.ConfigCompositionException ../omegaconf/omegaconf/nodes.py:55: ConfigCompositionException ``` ## Checklist - [X] I checked on the latest version of Hydra - [X] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips). ## To reproduce Use master branch of Hydra with OmegaConf's commit 2dd15f9 ## Additional context This might actually be an OmegaConf bug (I'm not sure).
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom omegaconf import MISSING\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass HelpConf:\n app_name: str = MISSING\n header: str = MISSING\n footer: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass HydraHelpConf:\n hydra_help: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass RunDir:\n dir: str = MISSING\n\n\n@dataclass\nclass SweepDir:\n dir: str = MISSING\n subdir: str = MISSING\n\n\n@dataclass\nclass OverridesConf:\n # Overrides for the hydra configuration\n hydra: List[str] = field(default_factory=lambda: [])\n # Overrides for the task configuration\n task: List[str] = field(default_factory=lambda: [])\n\n\n# job runtime information will be populated here\n@dataclass\nclass JobConf:\n # Job name, populated automatically unless specified by the user (in config or cli)\n name: str = MISSING\n\n # Populated automatically by Hydra.\n # Concatenation of job overrides that can be used as a part\n # of the directory name.\n # This can be configured via hydra.job.config.override_dirname\n override_dirname: str = MISSING\n\n # Job ID in underlying scheduling system\n id: str = MISSING\n\n # Job number if job is a part of a sweep\n num: int = MISSING\n\n # The config name used by the job\n config_name: Optional[str] = MISSING\n\n # Environment variables to set remotely\n env_set: Dict[str, str] = field(default_factory=dict)\n # Environment variables to copy from the launching machine\n env_copy: List[str] = field(default_factory=list)\n\n # Job config\n @dataclass\n class JobConfig:\n @dataclass\n # configuration for the ${hydra.job.override_dirname} runtime variable\n class OverrideDirname:\n kv_sep: str = \"=\"\n item_sep: str = \",\"\n exclude_keys: List[str] = field(default_factory=list)\n\n override_dirname: OverrideDirname = OverrideDirname()\n\n config: JobConfig = JobConfig()\n\n\n@dataclass\nclass ConfigSourceInfo:\n path: str\n schema: str\n provider: str\n\n\n@dataclass\nclass RuntimeConf:\n version: str = MISSING\n cwd: str = MISSING\n config_sources: List[ConfigSourceInfo] = MISSING\n\n # Composition choices dictionary\n choices: Dict[str, str] = field(default_factory=lambda: {})\n\n\n@dataclass\nclass HydraConf:\n defaults: List[Any] = field(\n default_factory=lambda: [\n {\"output\": \"default\"},\n {\"launcher\": \"basic\"},\n {\"sweeper\": \"basic\"},\n {\"help\": \"default\"},\n {\"hydra_help\": \"default\"},\n {\"hydra_logging\": \"default\"},\n {\"job_logging\": \"default\"},\n {\"callbacks\": None},\n # env specific overrides\n {\"env\": \"default\"},\n ]\n )\n\n # Elements to append to the config search path.\n # Note: This can only be configured in the primary config.\n searchpath: List[str] = field(default_factory=list)\n\n # Normal run output configuration\n run: RunDir = RunDir()\n # Multi-run output configuration\n sweep: SweepDir = SweepDir()\n # Logging configuration for Hydra\n hydra_logging: Any = MISSING\n # Logging configuration for the job\n job_logging: Any = MISSING\n\n # Sweeper configuration\n sweeper: Any = MISSING\n # Launcher configuration\n launcher: Any = MISSING\n # Callbacks configuration\n callbacks: Dict[str, Any] = field(default_factory=dict)\n\n # Program Help template\n help: HelpConf = HelpConf()\n # Hydra's Help template\n hydra_help: HydraHelpConf = HydraHelpConf()\n\n # Output directory for produced configuration files and overrides.\n # E.g., hydra.yaml, overrides.yaml will go here. Useful for debugging\n # and extra context when looking at past runs.\n # Setting to None will prevent the creation of the output subdir.\n output_subdir: Optional[str] = \".hydra\"\n\n # Those lists will contain runtime overrides\n overrides: OverridesConf = OverridesConf()\n\n job: JobConf = JobConf()\n\n # populated at runtime\n runtime: RuntimeConf = RuntimeConf()\n\n # Can be a boolean, string or a list of strings\n # If a boolean, setting to true will set the log level for the root logger to debug\n # If a string, it's interpreted as a the list [string]\n # If a list, each element is interpreted as a logger to have logging level set to debug.\n # Typical command lines to manipulate hydra.verbose:\n # hydra.verbose=true\n # hydra.verbose=[hydra,__main__]\n # TODO: good use case for Union support in OmegaConf\n verbose: Any = False\n\n\ncs = ConfigStore.instance()\n\ncs.store(\n group=\"hydra\",\n name=\"config\",\n node=HydraConf(),\n provider=\"hydra\",\n)\n", "path": "hydra/conf/__init__.py" } ]
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom omegaconf import MISSING\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass HelpConf:\n app_name: str = MISSING\n header: str = MISSING\n footer: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass HydraHelpConf:\n hydra_help: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass RunDir:\n dir: str = MISSING\n\n\n@dataclass\nclass SweepDir:\n dir: str = MISSING\n subdir: str = MISSING\n\n\n@dataclass\nclass OverridesConf:\n # Overrides for the hydra configuration\n hydra: List[str] = field(default_factory=lambda: [])\n # Overrides for the task configuration\n task: List[str] = field(default_factory=lambda: [])\n\n\n# job runtime information will be populated here\n@dataclass\nclass JobConf:\n # Job name, populated automatically unless specified by the user (in config or cli)\n name: str = MISSING\n\n # Populated automatically by Hydra.\n # Concatenation of job overrides that can be used as a part\n # of the directory name.\n # This can be configured via hydra.job.config.override_dirname\n override_dirname: str = MISSING\n\n # Job ID in underlying scheduling system\n id: str = MISSING\n\n # Job number if job is a part of a sweep\n num: int = MISSING\n\n # The config name used by the job\n config_name: Optional[str] = MISSING\n\n # Environment variables to set remotely\n env_set: Dict[str, str] = field(default_factory=dict)\n # Environment variables to copy from the launching machine\n env_copy: List[str] = field(default_factory=list)\n\n # Job config\n @dataclass\n class JobConfig:\n @dataclass\n # configuration for the ${hydra.job.override_dirname} runtime variable\n class OverrideDirname:\n kv_sep: str = \"=\"\n item_sep: str = \",\"\n exclude_keys: List[str] = field(default_factory=list)\n\n override_dirname: OverrideDirname = OverrideDirname()\n\n config: JobConfig = JobConfig()\n\n\n@dataclass\nclass ConfigSourceInfo:\n path: str\n schema: str\n provider: str\n\n\n@dataclass\nclass RuntimeConf:\n version: str = MISSING\n cwd: str = MISSING\n config_sources: List[ConfigSourceInfo] = MISSING\n\n # Composition choices dictionary\n # Ideally, the value type would be Union[str, List[str], None]\n choices: Dict[str, Any] = field(default_factory=lambda: {})\n\n\n@dataclass\nclass HydraConf:\n defaults: List[Any] = field(\n default_factory=lambda: [\n {\"output\": \"default\"},\n {\"launcher\": \"basic\"},\n {\"sweeper\": \"basic\"},\n {\"help\": \"default\"},\n {\"hydra_help\": \"default\"},\n {\"hydra_logging\": \"default\"},\n {\"job_logging\": \"default\"},\n {\"callbacks\": None},\n # env specific overrides\n {\"env\": \"default\"},\n ]\n )\n\n # Elements to append to the config search path.\n # Note: This can only be configured in the primary config.\n searchpath: List[str] = field(default_factory=list)\n\n # Normal run output configuration\n run: RunDir = RunDir()\n # Multi-run output configuration\n sweep: SweepDir = SweepDir()\n # Logging configuration for Hydra\n hydra_logging: Any = MISSING\n # Logging configuration for the job\n job_logging: Any = MISSING\n\n # Sweeper configuration\n sweeper: Any = MISSING\n # Launcher configuration\n launcher: Any = MISSING\n # Callbacks configuration\n callbacks: Dict[str, Any] = field(default_factory=dict)\n\n # Program Help template\n help: HelpConf = HelpConf()\n # Hydra's Help template\n hydra_help: HydraHelpConf = HydraHelpConf()\n\n # Output directory for produced configuration files and overrides.\n # E.g., hydra.yaml, overrides.yaml will go here. Useful for debugging\n # and extra context when looking at past runs.\n # Setting to None will prevent the creation of the output subdir.\n output_subdir: Optional[str] = \".hydra\"\n\n # Those lists will contain runtime overrides\n overrides: OverridesConf = OverridesConf()\n\n job: JobConf = JobConf()\n\n # populated at runtime\n runtime: RuntimeConf = RuntimeConf()\n\n # Can be a boolean, string or a list of strings\n # If a boolean, setting to true will set the log level for the root logger to debug\n # If a string, it's interpreted as a the list [string]\n # If a list, each element is interpreted as a logger to have logging level set to debug.\n # Typical command lines to manipulate hydra.verbose:\n # hydra.verbose=true\n # hydra.verbose=[hydra,__main__]\n # TODO: good use case for Union support in OmegaConf\n verbose: Any = False\n\n\ncs = ConfigStore.instance()\n\ncs.store(\n group=\"hydra\",\n name=\"config\",\n node=HydraConf(),\n provider=\"hydra\",\n)\n", "path": "hydra/conf/__init__.py" } ]
diff --git a/hydra/conf/__init__.py b/hydra/conf/__init__.py index bcb5648d2f5..c3eaaac462b 100644 --- a/hydra/conf/__init__.py +++ b/hydra/conf/__init__.py @@ -95,7 +95,8 @@ class RuntimeConf: config_sources: List[ConfigSourceInfo] = MISSING # Composition choices dictionary - choices: Dict[str, str] = field(default_factory=lambda: {}) + # Ideally, the value type would be Union[str, List[str], None] + choices: Dict[str, Any] = field(default_factory=lambda: {}) @dataclass
ckan__ckan-3631
Problem adding datasets to a group on package_create, the user has 'Editor' capacity ### CKAN Version if known (or site URL) Found in 2.2.2 and later ### Please describe the expected behaviour I manage a customized CKAN for a client. The create dataset page is changed in a way it is possible to add all metadata to a dataset on 'package_create'. Also it should be possible to add the dataset direktly to groups. The user has the capacity 'Editor' on the group. ### Please describe the actual behaviour The auth function 'package_create' always does the `check2 = _check_group_auth(context,data_dict)`, which is a different approach than in 'package_update' auth function. That leads to using the call to `authz.has_user_permission_for_group_or_org(group.id, user, 'update')`. Later this leads to a comparison of permission '**update**' with the permissions of 'Editor' role ('editor', ['read', 'delete_dataset', 'create_dataset', 'update_dataset', 'manage_group']). `if 'admin' in perms or permission in perms: return True` In my opinion this can never be true and thus is bug. Could you please check this? Regards, Daniel
[ { "content": "# encoding: utf-8\n\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.logic.auth as logic_auth\n\nfrom ckan.common import _\n\[email protected]_allow_anonymous_access\ndef package_create(context, data_dict=None):\n user = context['user']\n\n if authz.auth_is_anon_user(context):\n check1 = all(authz.check_config_permission(p) for p in (\n 'anon_create_dataset',\n 'create_dataset_if_not_in_organization',\n 'create_unowned_dataset',\n ))\n else:\n check1 = all(authz.check_config_permission(p) for p in (\n 'create_dataset_if_not_in_organization',\n 'create_unowned_dataset',\n )) or authz.has_user_permission_for_some_org(\n user, 'create_dataset')\n\n if not check1:\n return {'success': False, 'msg': _('User %s not authorized to create packages') % user}\n\n check2 = _check_group_auth(context,data_dict)\n if not check2:\n return {'success': False, 'msg': _('User %s not authorized to edit these groups') % user}\n\n # If an organization is given are we able to add a dataset to it?\n data_dict = data_dict or {}\n org_id = data_dict.get('owner_org')\n if org_id and not authz.has_user_permission_for_group_or_org(\n org_id, user, 'create_dataset'):\n return {'success': False, 'msg': _('User %s not authorized to add dataset to this organization') % user}\n return {'success': True}\n\n\ndef file_upload(context, data_dict=None):\n user = context['user']\n if authz.auth_is_anon_user(context):\n return {'success': False, 'msg': _('User %s not authorized to create packages') % user}\n return {'success': True}\n\n\ndef resource_create(context, data_dict):\n model = context['model']\n user = context.get('user')\n\n package_id = data_dict.get('package_id')\n if not package_id and data_dict.get('id'):\n # This can happen when auth is deferred, eg from `resource_view_create`\n resource = logic_auth.get_resource_object(context, data_dict)\n package_id = resource.package_id\n\n if not package_id:\n raise logic.NotFound(\n _('No dataset id provided, cannot check auth.')\n )\n\n # check authentication against package\n pkg = model.Package.get(package_id)\n if not pkg:\n raise logic.NotFound(\n _('No package found for this resource, cannot check auth.')\n )\n\n pkg_dict = {'id': pkg.id}\n authorized = authz.is_authorized('package_update', context, pkg_dict).get('success')\n\n if not authorized:\n return {'success': False,\n 'msg': _('User %s not authorized to create resources on dataset %s') %\n (str(user), package_id)}\n else:\n return {'success': True}\n\n\ndef resource_view_create(context, data_dict):\n return authz.is_authorized('resource_create', context, {'id': data_dict['resource_id']})\n\n\ndef resource_create_default_resource_views(context, data_dict):\n return authz.is_authorized('resource_create', context, {'id': data_dict['resource']['id']})\n\n\ndef package_create_default_resource_views(context, data_dict):\n return authz.is_authorized('package_update', context,\n data_dict['package'])\n\n\ndef package_relationship_create(context, data_dict):\n user = context['user']\n\n id = data_dict['subject']\n id2 = data_dict['object']\n\n # If we can update each package we can see the relationships\n authorized1 = authz.is_authorized_boolean(\n 'package_update', context, {'id': id})\n authorized2 = authz.is_authorized_boolean(\n 'package_update', context, {'id': id2})\n\n if not authorized1 and authorized2:\n return {'success': False, 'msg': _('User %s not authorized to edit these packages') % user}\n else:\n return {'success': True}\n\ndef group_create(context, data_dict=None):\n user = context['user']\n user = authz.get_user_id_for_username(user, allow_none=True)\n\n if user and authz.check_config_permission('user_create_groups'):\n return {'success': True}\n return {'success': False,\n 'msg': _('User %s not authorized to create groups') % user}\n\n\ndef organization_create(context, data_dict=None):\n user = context['user']\n user = authz.get_user_id_for_username(user, allow_none=True)\n\n if user and authz.check_config_permission('user_create_organizations'):\n return {'success': True}\n return {'success': False,\n 'msg': _('User %s not authorized to create organizations') % user}\n\ndef rating_create(context, data_dict):\n # No authz check in the logic function\n return {'success': True}\n\n\[email protected]_allow_anonymous_access\ndef user_create(context, data_dict=None):\n using_api = 'api_version' in context\n create_user_via_api = authz.check_config_permission(\n 'create_user_via_api')\n create_user_via_web = authz.check_config_permission(\n 'create_user_via_web')\n\n if using_api and not create_user_via_api:\n return {'success': False, 'msg': _('User {user} not authorized to '\n 'create users via the API').format(user=context.get('user'))}\n if not using_api and not create_user_via_web:\n return {'success': False, 'msg': _('Not authorized to '\n 'create users')}\n return {'success': True}\n\ndef user_invite(context, data_dict):\n data_dict['id'] = data_dict['group_id']\n return group_member_create(context, data_dict)\n\ndef _check_group_auth(context, data_dict):\n '''Has this user got update permission for all of the given groups?\n If there is a package in the context then ignore that package's groups.\n (owner_org is checked elsewhere.)\n :returns: False if not allowed to update one (or more) of the given groups.\n True otherwise. i.e. True is the default. A blank data_dict\n mentions no groups, so it returns True.\n\n '''\n # FIXME This code is shared amoung other logic.auth files and should be\n # somewhere better\n if not data_dict:\n return True\n\n model = context['model']\n user = context['user']\n pkg = context.get(\"package\")\n\n api_version = context.get('api_version') or '1'\n\n group_blobs = data_dict.get('groups', [])\n groups = set()\n for group_blob in group_blobs:\n # group_blob might be a dict or a group_ref\n if isinstance(group_blob, dict):\n # use group id by default, but we can accept name as well\n id = group_blob.get('id') or group_blob.get('name')\n if not id:\n continue\n else:\n id = group_blob\n grp = model.Group.get(id)\n if grp is None:\n raise logic.NotFound(_('Group was not found.'))\n groups.add(grp)\n\n if pkg:\n pkg_groups = pkg.get_groups()\n\n groups = groups - set(pkg_groups)\n\n for group in groups:\n if not authz.has_user_permission_for_group_or_org(group.id, user, 'update'):\n return False\n\n return True\n\n## Modifications for rest api\n\ndef package_create_rest(context, data_dict):\n model = context['model']\n user = context['user']\n if not user:\n return {'success': False, 'msg': _('Valid API key needed to create a package')}\n\n return authz.is_authorized('package_create', context, data_dict)\n\ndef group_create_rest(context, data_dict):\n model = context['model']\n user = context['user']\n if not user:\n return {'success': False, 'msg': _('Valid API key needed to create a group')}\n\n return authz.is_authorized('group_create', context, data_dict)\n\ndef vocabulary_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef activity_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef tag_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef _group_or_org_member_create(context, data_dict):\n user = context['user']\n group_id = data_dict['id']\n if not authz.has_user_permission_for_group_or_org(group_id, user, 'membership'):\n return {'success': False, 'msg': _('User %s not authorized to add members') % user}\n return {'success': True}\n\ndef organization_member_create(context, data_dict):\n return _group_or_org_member_create(context, data_dict)\n\ndef group_member_create(context, data_dict):\n return _group_or_org_member_create(context, data_dict)\n\ndef member_create(context, data_dict):\n group = logic_auth.get_group_object(context, data_dict)\n user = context['user']\n\n # User must be able to update the group to add a member to it\n permission = 'update'\n # However if the user is member of group then they can add/remove datasets\n if not group.is_organization and data_dict.get('object_type') == 'package':\n permission = 'manage_group'\n\n authorized = authz.has_user_permission_for_group_or_org(group.id,\n user,\n permission)\n if not authorized:\n return {'success': False,\n 'msg': _('User %s not authorized to edit group %s') %\n (str(user), group.id)}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/create.py" } ]
[ { "content": "# encoding: utf-8\n\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.logic.auth as logic_auth\n\nfrom ckan.common import _\n\[email protected]_allow_anonymous_access\ndef package_create(context, data_dict=None):\n user = context['user']\n\n if authz.auth_is_anon_user(context):\n check1 = all(authz.check_config_permission(p) for p in (\n 'anon_create_dataset',\n 'create_dataset_if_not_in_organization',\n 'create_unowned_dataset',\n ))\n else:\n check1 = all(authz.check_config_permission(p) for p in (\n 'create_dataset_if_not_in_organization',\n 'create_unowned_dataset',\n )) or authz.has_user_permission_for_some_org(\n user, 'create_dataset')\n\n if not check1:\n return {'success': False, 'msg': _('User %s not authorized to create packages') % user}\n\n check2 = _check_group_auth(context,data_dict)\n if not check2:\n return {'success': False, 'msg': _('User %s not authorized to edit these groups') % user}\n\n # If an organization is given are we able to add a dataset to it?\n data_dict = data_dict or {}\n org_id = data_dict.get('owner_org')\n if org_id and not authz.has_user_permission_for_group_or_org(\n org_id, user, 'create_dataset'):\n return {'success': False, 'msg': _('User %s not authorized to add dataset to this organization') % user}\n return {'success': True}\n\n\ndef file_upload(context, data_dict=None):\n user = context['user']\n if authz.auth_is_anon_user(context):\n return {'success': False, 'msg': _('User %s not authorized to create packages') % user}\n return {'success': True}\n\n\ndef resource_create(context, data_dict):\n model = context['model']\n user = context.get('user')\n\n package_id = data_dict.get('package_id')\n if not package_id and data_dict.get('id'):\n # This can happen when auth is deferred, eg from `resource_view_create`\n resource = logic_auth.get_resource_object(context, data_dict)\n package_id = resource.package_id\n\n if not package_id:\n raise logic.NotFound(\n _('No dataset id provided, cannot check auth.')\n )\n\n # check authentication against package\n pkg = model.Package.get(package_id)\n if not pkg:\n raise logic.NotFound(\n _('No package found for this resource, cannot check auth.')\n )\n\n pkg_dict = {'id': pkg.id}\n authorized = authz.is_authorized('package_update', context, pkg_dict).get('success')\n\n if not authorized:\n return {'success': False,\n 'msg': _('User %s not authorized to create resources on dataset %s') %\n (str(user), package_id)}\n else:\n return {'success': True}\n\n\ndef resource_view_create(context, data_dict):\n return authz.is_authorized('resource_create', context, {'id': data_dict['resource_id']})\n\n\ndef resource_create_default_resource_views(context, data_dict):\n return authz.is_authorized('resource_create', context, {'id': data_dict['resource']['id']})\n\n\ndef package_create_default_resource_views(context, data_dict):\n return authz.is_authorized('package_update', context,\n data_dict['package'])\n\n\ndef package_relationship_create(context, data_dict):\n user = context['user']\n\n id = data_dict['subject']\n id2 = data_dict['object']\n\n # If we can update each package we can see the relationships\n authorized1 = authz.is_authorized_boolean(\n 'package_update', context, {'id': id})\n authorized2 = authz.is_authorized_boolean(\n 'package_update', context, {'id': id2})\n\n if not authorized1 and authorized2:\n return {'success': False, 'msg': _('User %s not authorized to edit these packages') % user}\n else:\n return {'success': True}\n\ndef group_create(context, data_dict=None):\n user = context['user']\n user = authz.get_user_id_for_username(user, allow_none=True)\n\n if user and authz.check_config_permission('user_create_groups'):\n return {'success': True}\n return {'success': False,\n 'msg': _('User %s not authorized to create groups') % user}\n\n\ndef organization_create(context, data_dict=None):\n user = context['user']\n user = authz.get_user_id_for_username(user, allow_none=True)\n\n if user and authz.check_config_permission('user_create_organizations'):\n return {'success': True}\n return {'success': False,\n 'msg': _('User %s not authorized to create organizations') % user}\n\ndef rating_create(context, data_dict):\n # No authz check in the logic function\n return {'success': True}\n\n\[email protected]_allow_anonymous_access\ndef user_create(context, data_dict=None):\n using_api = 'api_version' in context\n create_user_via_api = authz.check_config_permission(\n 'create_user_via_api')\n create_user_via_web = authz.check_config_permission(\n 'create_user_via_web')\n\n if using_api and not create_user_via_api:\n return {'success': False, 'msg': _('User {user} not authorized to '\n 'create users via the API').format(user=context.get('user'))}\n if not using_api and not create_user_via_web:\n return {'success': False, 'msg': _('Not authorized to '\n 'create users')}\n return {'success': True}\n\ndef user_invite(context, data_dict):\n data_dict['id'] = data_dict['group_id']\n return group_member_create(context, data_dict)\n\ndef _check_group_auth(context, data_dict):\n '''Has this user got update permission for all of the given groups?\n If there is a package in the context then ignore that package's groups.\n (owner_org is checked elsewhere.)\n :returns: False if not allowed to update one (or more) of the given groups.\n True otherwise. i.e. True is the default. A blank data_dict\n mentions no groups, so it returns True.\n\n '''\n # FIXME This code is shared amoung other logic.auth files and should be\n # somewhere better\n if not data_dict:\n return True\n\n model = context['model']\n user = context['user']\n pkg = context.get(\"package\")\n\n api_version = context.get('api_version') or '1'\n\n group_blobs = data_dict.get('groups', [])\n groups = set()\n for group_blob in group_blobs:\n # group_blob might be a dict or a group_ref\n if isinstance(group_blob, dict):\n # use group id by default, but we can accept name as well\n id = group_blob.get('id') or group_blob.get('name')\n if not id:\n continue\n else:\n id = group_blob\n grp = model.Group.get(id)\n if grp is None:\n raise logic.NotFound(_('Group was not found.'))\n groups.add(grp)\n\n if pkg:\n pkg_groups = pkg.get_groups()\n\n groups = groups - set(pkg_groups)\n\n for group in groups:\n if not authz.has_user_permission_for_group_or_org(group.id, user, 'manage_group'):\n return False\n\n return True\n\n## Modifications for rest api\n\ndef package_create_rest(context, data_dict):\n model = context['model']\n user = context['user']\n if not user:\n return {'success': False, 'msg': _('Valid API key needed to create a package')}\n\n return authz.is_authorized('package_create', context, data_dict)\n\ndef group_create_rest(context, data_dict):\n model = context['model']\n user = context['user']\n if not user:\n return {'success': False, 'msg': _('Valid API key needed to create a group')}\n\n return authz.is_authorized('group_create', context, data_dict)\n\ndef vocabulary_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef activity_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef tag_create(context, data_dict):\n # sysadmins only\n return {'success': False}\n\ndef _group_or_org_member_create(context, data_dict):\n user = context['user']\n group_id = data_dict['id']\n if not authz.has_user_permission_for_group_or_org(group_id, user, 'membership'):\n return {'success': False, 'msg': _('User %s not authorized to add members') % user}\n return {'success': True}\n\ndef organization_member_create(context, data_dict):\n return _group_or_org_member_create(context, data_dict)\n\ndef group_member_create(context, data_dict):\n return _group_or_org_member_create(context, data_dict)\n\ndef member_create(context, data_dict):\n group = logic_auth.get_group_object(context, data_dict)\n user = context['user']\n\n # User must be able to update the group to add a member to it\n permission = 'update'\n # However if the user is member of group then they can add/remove datasets\n if not group.is_organization and data_dict.get('object_type') == 'package':\n permission = 'manage_group'\n\n authorized = authz.has_user_permission_for_group_or_org(group.id,\n user,\n permission)\n if not authorized:\n return {'success': False,\n 'msg': _('User %s not authorized to edit group %s') %\n (str(user), group.id)}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/create.py" } ]
diff --git a/ckan/logic/auth/create.py b/ckan/logic/auth/create.py index 9d588c92696..8977d735327 100644 --- a/ckan/logic/auth/create.py +++ b/ckan/logic/auth/create.py @@ -195,7 +195,7 @@ def _check_group_auth(context, data_dict): groups = groups - set(pkg_groups) for group in groups: - if not authz.has_user_permission_for_group_or_org(group.id, user, 'update'): + if not authz.has_user_permission_for_group_or_org(group.id, user, 'manage_group'): return False return True
conan-io__conan-center-index-2128
[package] wt/4.3.1: duplicate symbols during linking, "multiple definition of `Wt::WServer::~WServer()" ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **wt/4.3.1** * Operating System+version: **Linux Ubuntu 20.04** * Compiler+version: **GCC 9** * Docker image: **N/A** * Conan version: **conan 1.26.1** * Python version: **Python 3.8.2** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` Configuration for profile default: [settings] os=Linux os_build=Linux arch=x86_64 arch_build=x86_64 compiler=gcc compiler.version=7 compiler.libcxx=libstdc++11 build_type=Release [options] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) Just trying to upgrade from a local install of 4.3.0 to the conan version 4.3.1. I don't think that wttest lib should be there. It would also be nice to have a component version of the library. ``` target_link_libraries(ppb.wt PRIVATE project_options - Wt::HTTP - Wt::DboSqlite3 - Wt::Dbo - Wt::Wt + CONAN_PKG::wt + # Wt::HTTP + # Wt::DboSqlite3 + # Wt::Dbo + # Wt::Wt CONAN_PKG::nlohmann_json stdc+ ``` ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` [1/1] Linking CXX executable bin/ppb.wt FAILED: bin/ppb.wt : && /usr/bin/c++ -O2 -g -DNDEBUG ppb/CMakeFiles/ppb.wt.dir/model/cart.cpp.o ppb/CMakeFiles/ppb.wt.dir/ppb_application.cpp.o ppb/CMakeFiles/ppb.wt.dir/db/db_types.cpp.o ppb/CMakeFiles/ppb.wt.dir/db/static_product_database.cpp.o ppb/CMakeFiles/ppb.wt.dir/model/product_session.cpp.o ppb/CMakeFiles/ppb.wt.dir/model/product_database.cpp.o ppb/CMakeFiles/ppb.wt.dir/widget/anchor.cpp.o ppb/CMakeFiles/ppb.wt.dir/widget/text_button_box.cpp.o ppb/CMakeFiles/ppb.wt.dir/view/ppb_view.cpp.o ppb/CMakeFiles/ppb.wt.dir/view/ppb_cart_view.cpp.o ppb/CMakeFiles/ppb.wt.dir/main.cpp.o -o bin/ppb.wt -lstdc++fs /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwtdbopostgres.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwtdbosqlite3.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwtdbomysql.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwtdbo.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwt.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_wave.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_container.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_contract.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_exception.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_graph.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_iostreams.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_locale.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_log.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_program_options.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_random.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_regex.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_serialization.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_wserialization.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_coroutine.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_fiber.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_context.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_timer.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_thread.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_chrono.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_date_time.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_atomic.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_filesystem.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_system.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_type_erasure.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_log_setup.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_c99.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_c99f.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_c99l.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_tr1.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_tr1f.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_math_tr1l.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_nowide.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_stacktrace_addr2line.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_stacktrace_backtrace.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_stacktrace_basic.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_stacktrace_noop.a /home/ppetraki/.conan/data/boost/1.73.0/_/_/package/7e3a27591b41ce17376bfed79a95acf351cb2212/lib/libboost_unit_test_framework.a -lrt /home/ppetraki/.conan/data/bzip2/1.0.8/_/_/package/ff2ff6bb41ed84283a6634bada252c37f13eb93c/lib/libbz2.a /home/ppetraki/.conan/data/sqlite3/3.31.1/_/_/package/3227f52e0374b0fd6694f607708d905c74b60866/lib/libsqlite3.a /home/ppetraki/.conan/data/libmysqlclient/8.0.17/_/_/package/28945b70ae74bb6ebb259ca1f8be9f4b4b86384a/lib/libmysqlclient.a /home/ppetraki/.conan/data/openssl/1.1.1g/_/_/package/6b7ff26bfd4c2cf2ccba522bfba2d2e7820e40da/lib/libssl.a /home/ppetraki/.conan/data/openssl/1.1.1g/_/_/package/6b7ff26bfd4c2cf2ccba522bfba2d2e7820e40da/lib/libcrypto.a -ldl -lstdc++ -lm /home/ppetraki/.conan/data/libpq/11.5/_/_/package/37e2cf0ad9b4cbcb2b2a3538a853ba3e45956a8e/lib/libpq.a /home/ppetraki/.conan/data/libpq/11.5/_/_/package/37e2cf0ad9b4cbcb2b2a3538a853ba3e45956a8e/lib/libpgcommon.a /home/ppetraki/.conan/data/zlib/1.2.11/_/_/package/6b7ff26bfd4c2cf2ccba522bfba2d2e7820e40da/lib/libz.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind-coredump.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind-generic.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind-ptrace.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind-setjmp.a /home/ppetraki/.conan/data/libunwind/1.3.1/_/_/package/f72b33a5a0666d86cece5faa303b85f79aacec62/lib/libunwind-x86_64.a /home/ppetraki/.conan/data/xz_utils/5.2.4/_/_/package/6b7ff26bfd4c2cf2ccba522bfba2d2e7820e40da/lib/liblzma.a -lpthread && : /usr/bin/ld: /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a(WServer.C.o): in function `Wt::WServer::~WServer()': /home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/http/WServer.C:104: multiple definition of `Wt::WServer::~WServer()'; /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a(WTestEnvironment.C.o):/home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/Wt/Test/WTestEnvironment.C:33: first defined here /usr/bin/ld: /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a(WServer.C.o): in function `Wt::WServer::~WServer()': /home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/http/WServer.C:104: multiple definition of `Wt::WServer::~WServer()'; /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a(WTestEnvironment.C.o):/home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/Wt/Test/WTestEnvironment.C:33: first defined here /usr/bin/ld: /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a(WServer.C.o): in function `Wt::WServer::~WServer()': /home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/http/WServer.C:104: multiple definition of `Wt::WServer::~WServer()'; /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a(WTestEnvironment.C.o):/home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/Wt/Test/WTestEnvironment.C:33: first defined here /usr/bin/ld: /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a(WServer.C.o): in function `Wt::WServer::WServer(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)': /home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/http/WServer.C:79: multiple definition of `Wt::WServer::WServer(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)'; /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a(WTestEnvironment.C.o):/home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/Wt/Test/WTestEnvironment.C:21: first defined here /usr/bin/ld: /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwthttp.a(WServer.C.o): in function `Wt::WServer::WServer(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)': /home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/http/WServer.C:79: multiple definition of `Wt::WServer::WServer(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)'; /home/ppetraki/.conan/data/wt/4.3.1/_/_/package/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/lib/libwttest.a(WTestEnvironment.C.o):/home/ppetraki/.conan/data/wt/4.3.1/_/_/build/2982a0e1e85adfb154853ddea2afc96acdfb5a3d/source_subfolder/src/Wt/Test/WTestEnvironment.C:21: first defined here collect2: error: ld returned 1 exit status ninja: build stopped: subcommand failed. ``` </details>
[ { "content": "from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\n\nclass WtConan(ConanFile):\n name = \"wt\"\n description = \"Wt is a C++ library for developing web applications\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/emweb/wt\"\n topics = (\"conan\", \"wt\", \"web\", \"webapp\")\n license = \"GPL-2.0-only\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_ssl\": [True, False],\n \"with_sqlite\": [True, False],\n \"with_postgres\": [True, False],\n \"with_mysql\": [True, False],\n \"with_mssql\": [True, False],\n \"with_test\": [True, False],\n \"with_dbo\": [True, False],\n \"with_opengl\": [True, False],\n \"with_unwind\": [True, False],\n \"no_std_locale\": [True, False],\n \"no_std_wstring\": [True, False],\n \"multi_threaded\": [True, False],\n \"connector_http\": [True, False],\n \"connector_isapi\": [True, False],\n \"connector_fcgi\": [True, False]\n }\n default_options = {\n 'shared': False,\n 'fPIC': True,\n 'with_ssl': True,\n 'with_sqlite': True,\n 'with_postgres': True,\n 'with_mysql': True,\n 'with_mssql': False,\n 'with_test': True,\n 'with_dbo': True,\n 'with_opengl': False,\n 'with_unwind': True,\n 'no_std_locale': False,\n 'no_std_wstring': False,\n 'multi_threaded': True,\n 'connector_http': True,\n 'connector_isapi': True,\n 'connector_fcgi': False\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n _cmake = None\n\n requires = ('zlib/1.2.11', 'boost/1.73.0')\n\n def requirements(self):\n if self.options.with_ssl:\n self.requires('openssl/1.1.1g')\n if self.options.with_sqlite:\n self.requires('sqlite3/3.31.1')\n if self.options.with_mysql:\n self.requires('libmysqlclient/8.0.17')\n if self.options.with_postgres:\n self.requires('libpq/11.5')\n if self.options.with_unwind:\n self.requires('libunwind/1.3.1')\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n del self.options.connector_fcgi\n else:\n del self.options.connector_isapi\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n self.options.with_unwind = False\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions['CONFIGDIR'] = os.path.join(self.package_folder, 'bin')\n self._cmake.definitions['SHARED_LIBS'] = self.options.shared\n self._cmake.definitions['BUILD_EXAMPLES'] = False\n self._cmake.definitions['BUILD_TESTS'] = False\n self._cmake.definitions['ENABLE_SSL'] = self.options.with_ssl\n self._cmake.definitions['ENABLE_HARU'] = False\n self._cmake.definitions['ENABLE_PANGO'] = False\n self._cmake.definitions['ENABLE_SQLITE'] = self.options.with_sqlite\n self._cmake.definitions['ENABLE_POSTGRES'] = self.options.with_postgres\n self._cmake.definitions['ENABLE_FIREBIRD'] = False\n self._cmake.definitions['ENABLE_MYSQL'] = self.options.with_mysql\n self._cmake.definitions['ENABLE_MSSQLSERVER'] = self.options.with_mssql\n self._cmake.definitions['ENABLE_QT4'] = False\n self._cmake.definitions['ENABLE_QT5'] = False\n self._cmake.definitions['ENABLE_LIBWTTEST'] = self.options.with_test\n self._cmake.definitions['ENABLE_LIBWTDBO'] = self.options.with_dbo\n self._cmake.definitions['ENABLE_OPENGL'] = self.options.with_opengl\n self._cmake.definitions['ENABLE_UNWIND'] = self.options.with_unwind\n self._cmake.definitions['WT_NO_STD_LOCALE'] = self.options.no_std_locale\n self._cmake.definitions['WT_NO_STD_WSTRING'] = self.options.no_std_wstring\n self._cmake.definitions['MULTI_THREADED'] = self.options.multi_threaded\n self._cmake.definitions['USE_SYSTEM_SQLITE3'] = True\n self._cmake.definitions['DEBUG'] = self.settings.build_type == 'Debug'\n self._cmake.definitions['CONNECTOR_HTTP'] = self.options.connector_http\n self._cmake.definitions['BOOST_DYNAMIC'] = self.options['boost'].shared\n\n def _gather_libs(p):\n libs = self.deps_cpp_info[p].libs + self.deps_cpp_info[p].system_libs\n if not getattr(self.options[p],'shared', False):\n for dep in self.deps_cpp_info[p].public_deps:\n for l in _gather_libs(dep):\n if not l in libs:\n libs.append(l)\n return libs\n\n if self.options.with_ssl:\n self._cmake.definitions['OPENSSL_PREFIX'] = self.deps_cpp_info['openssl'].rootpath\n self._cmake.definitions['OPENSSL_LIBRARIES'] = ';'.join(_gather_libs('openssl'))\n self._cmake.definitions['OPENSSL_INCLUDE_DIR'] = ';'.join(self.deps_cpp_info['openssl'].include_paths)\n self._cmake.definitions['OPENSSL_FOUND'] = True\n if self.options.with_mysql:\n self._cmake.definitions['MYSQL_LIBRARIES'] = ';'.join(_gather_libs('libmysqlclient'))\n self._cmake.definitions['MYSQL_INCLUDE'] = ';'.join(self.deps_cpp_info['libmysqlclient'].include_paths)\n self._cmake.definitions['MYSQL_DEFINITIONS'] = ';'.join('-D%s' % d for d in self.deps_cpp_info['libmysqlclient'].defines)\n self._cmake.definitions['MYSQL_FOUND'] = True\n if self.options.with_postgres:\n self._cmake.definitions['POSTGRES_LIBRARIES'] = ';'.join(_gather_libs('libpq'))\n self._cmake.definitions['POSTGRES_INCLUDE'] = ';'.join(self.deps_cpp_info['libpq'].include_paths)\n self._cmake.definitions['POSTGRES_FOUND'] = True\n if self.settings.os == 'Windows':\n self._cmake.definitions['CONNECTOR_FCGI'] = False\n self._cmake.definitions['CONNECTOR_ISAPI'] = self.options.connector_isapi\n else:\n self._cmake.definitions['CONNECTOR_FCGI'] = self.options.connector_fcgi\n self._cmake.definitions['CONNECTOR_ISAPI'] = False\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'find_package(OpenSSL)', '#find_package(OpenSSL)')\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'INCLUDE(cmake/WtFindMysql.txt)', '#INCLUDE(cmake/WtFindMysql.txt)')\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'INCLUDE(cmake/WtFindPostgresql.txt)', '#INCLUDE(cmake/WtFindPostgresql.txt)')\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n shutil.move(os.path.join(self.package_folder, \"share\", \"Wt\"), os.path.join(self.package_folder, \"bin\")) \n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"var\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = []\n if self.options.with_test:\n self.cpp_info.libs.append('wttest')\n if self.options.with_postgres:\n self.cpp_info.libs.append('wtdbopostgres')\n if self.options.with_sqlite:\n self.cpp_info.libs.append('wtdbosqlite3')\n if self.options.with_mysql:\n self.cpp_info.libs.append('wtdbomysql')\n if self.options.with_mssql:\n self.cpp_info.libs.append('wtdbomssqlserver')\n if self.options.with_dbo:\n self.cpp_info.libs.append('wtdbo')\n if self.options.connector_http:\n self.cpp_info.libs.append('wthttp')\n if self.settings.os == 'Windows':\n if self.options.connector_isapi:\n self.cpp_info.libs.append('wtisapi')\n else:\n if self.options.connector_fcgi:\n self.cpp_info.libs.append('wtfcgi')\n self.cpp_info.libs.append('wt')\n if self.settings.build_type == 'Debug':\n self.cpp_info.libs = ['%sd' % lib for lib in self.cpp_info.libs]\n if self.settings.os == 'Linux':\n self.cpp_info.system_libs.append('dl')\n elif self.settings.os == 'Windows':\n self.cpp_info.system_libs.extend(['ws2_32', 'mswsock', 'wsock32'])\n", "path": "recipes/wt/all/conanfile.py" } ]
[ { "content": "from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\n\nclass WtConan(ConanFile):\n name = \"wt\"\n description = \"Wt is a C++ library for developing web applications\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/emweb/wt\"\n topics = (\"conan\", \"wt\", \"web\", \"webapp\")\n license = \"GPL-2.0-only\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_ssl\": [True, False],\n \"with_sqlite\": [True, False],\n \"with_postgres\": [True, False],\n \"with_mysql\": [True, False],\n \"with_mssql\": [True, False],\n \"with_test\": [True, False],\n \"with_dbo\": [True, False],\n \"with_opengl\": [True, False],\n \"with_unwind\": [True, False],\n \"no_std_locale\": [True, False],\n \"no_std_wstring\": [True, False],\n \"multi_threaded\": [True, False],\n \"connector_http\": [True, False],\n \"connector_isapi\": [True, False],\n \"connector_fcgi\": [True, False]\n }\n default_options = {\n 'shared': False,\n 'fPIC': True,\n 'with_ssl': True,\n 'with_sqlite': True,\n 'with_postgres': True,\n 'with_mysql': True,\n 'with_mssql': False,\n 'with_test': False,\n 'with_dbo': True,\n 'with_opengl': False,\n 'with_unwind': True,\n 'no_std_locale': False,\n 'no_std_wstring': False,\n 'multi_threaded': True,\n 'connector_http': True,\n 'connector_isapi': True,\n 'connector_fcgi': False\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n _cmake = None\n\n requires = ('zlib/1.2.11', 'boost/1.73.0')\n\n def requirements(self):\n if self.options.with_ssl:\n self.requires('openssl/1.1.1g')\n if self.options.with_sqlite:\n self.requires('sqlite3/3.31.1')\n if self.options.with_mysql:\n self.requires('libmysqlclient/8.0.17')\n if self.options.with_postgres:\n self.requires('libpq/11.5')\n if self.options.with_unwind:\n self.requires('libunwind/1.3.1')\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n del self.options.connector_fcgi\n else:\n del self.options.connector_isapi\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n self.options.with_unwind = False\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions['CONFIGDIR'] = os.path.join(self.package_folder, 'bin')\n self._cmake.definitions['SHARED_LIBS'] = self.options.shared\n self._cmake.definitions['BUILD_EXAMPLES'] = False\n self._cmake.definitions['BUILD_TESTS'] = False\n self._cmake.definitions['ENABLE_SSL'] = self.options.with_ssl\n self._cmake.definitions['ENABLE_HARU'] = False\n self._cmake.definitions['ENABLE_PANGO'] = False\n self._cmake.definitions['ENABLE_SQLITE'] = self.options.with_sqlite\n self._cmake.definitions['ENABLE_POSTGRES'] = self.options.with_postgres\n self._cmake.definitions['ENABLE_FIREBIRD'] = False\n self._cmake.definitions['ENABLE_MYSQL'] = self.options.with_mysql\n self._cmake.definitions['ENABLE_MSSQLSERVER'] = self.options.with_mssql\n self._cmake.definitions['ENABLE_QT4'] = False\n self._cmake.definitions['ENABLE_QT5'] = False\n self._cmake.definitions['ENABLE_LIBWTTEST'] = self.options.with_test\n self._cmake.definitions['ENABLE_LIBWTDBO'] = self.options.with_dbo\n self._cmake.definitions['ENABLE_OPENGL'] = self.options.with_opengl\n self._cmake.definitions['ENABLE_UNWIND'] = self.options.with_unwind\n self._cmake.definitions['WT_NO_STD_LOCALE'] = self.options.no_std_locale\n self._cmake.definitions['WT_NO_STD_WSTRING'] = self.options.no_std_wstring\n self._cmake.definitions['MULTI_THREADED'] = self.options.multi_threaded\n self._cmake.definitions['USE_SYSTEM_SQLITE3'] = True\n self._cmake.definitions['DEBUG'] = self.settings.build_type == 'Debug'\n self._cmake.definitions['CONNECTOR_HTTP'] = self.options.connector_http\n self._cmake.definitions['BOOST_DYNAMIC'] = self.options['boost'].shared\n\n def _gather_libs(p):\n libs = self.deps_cpp_info[p].libs + self.deps_cpp_info[p].system_libs\n if not getattr(self.options[p],'shared', False):\n for dep in self.deps_cpp_info[p].public_deps:\n for l in _gather_libs(dep):\n if not l in libs:\n libs.append(l)\n return libs\n\n if self.options.with_ssl:\n self._cmake.definitions['OPENSSL_PREFIX'] = self.deps_cpp_info['openssl'].rootpath\n self._cmake.definitions['OPENSSL_LIBRARIES'] = ';'.join(_gather_libs('openssl'))\n self._cmake.definitions['OPENSSL_INCLUDE_DIR'] = ';'.join(self.deps_cpp_info['openssl'].include_paths)\n self._cmake.definitions['OPENSSL_FOUND'] = True\n if self.options.with_mysql:\n self._cmake.definitions['MYSQL_LIBRARIES'] = ';'.join(_gather_libs('libmysqlclient'))\n self._cmake.definitions['MYSQL_INCLUDE'] = ';'.join(self.deps_cpp_info['libmysqlclient'].include_paths)\n self._cmake.definitions['MYSQL_DEFINITIONS'] = ';'.join('-D%s' % d for d in self.deps_cpp_info['libmysqlclient'].defines)\n self._cmake.definitions['MYSQL_FOUND'] = True\n if self.options.with_postgres:\n self._cmake.definitions['POSTGRES_LIBRARIES'] = ';'.join(_gather_libs('libpq'))\n self._cmake.definitions['POSTGRES_INCLUDE'] = ';'.join(self.deps_cpp_info['libpq'].include_paths)\n self._cmake.definitions['POSTGRES_FOUND'] = True\n if self.settings.os == 'Windows':\n self._cmake.definitions['CONNECTOR_FCGI'] = False\n self._cmake.definitions['CONNECTOR_ISAPI'] = self.options.connector_isapi\n else:\n self._cmake.definitions['CONNECTOR_FCGI'] = self.options.connector_fcgi\n self._cmake.definitions['CONNECTOR_ISAPI'] = False\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'find_package(OpenSSL)', '#find_package(OpenSSL)')\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'INCLUDE(cmake/WtFindMysql.txt)', '#INCLUDE(cmake/WtFindMysql.txt)')\n tools.replace_in_file(os.path.join(self._source_subfolder, 'CMakeLists.txt'), 'INCLUDE(cmake/WtFindPostgresql.txt)', '#INCLUDE(cmake/WtFindPostgresql.txt)')\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n shutil.move(os.path.join(self.package_folder, \"share\", \"Wt\"), os.path.join(self.package_folder, \"bin\")) \n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"var\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = []\n if self.options.with_test:\n self.cpp_info.libs.append('wttest')\n if self.options.with_postgres:\n self.cpp_info.libs.append('wtdbopostgres')\n if self.options.with_sqlite:\n self.cpp_info.libs.append('wtdbosqlite3')\n if self.options.with_mysql:\n self.cpp_info.libs.append('wtdbomysql')\n if self.options.with_mssql:\n self.cpp_info.libs.append('wtdbomssqlserver')\n if self.options.with_dbo:\n self.cpp_info.libs.append('wtdbo')\n if self.options.connector_http:\n self.cpp_info.libs.append('wthttp')\n if self.settings.os == 'Windows':\n if self.options.connector_isapi:\n self.cpp_info.libs.append('wtisapi')\n else:\n if self.options.connector_fcgi:\n self.cpp_info.libs.append('wtfcgi')\n self.cpp_info.libs.append('wt')\n if self.settings.build_type == 'Debug':\n self.cpp_info.libs = ['%sd' % lib for lib in self.cpp_info.libs]\n if self.settings.os == 'Linux':\n self.cpp_info.system_libs.append('dl')\n elif self.settings.os == 'Windows':\n self.cpp_info.system_libs.extend(['ws2_32', 'mswsock', 'wsock32'])\n", "path": "recipes/wt/all/conanfile.py" } ]
diff --git a/recipes/wt/all/conanfile.py b/recipes/wt/all/conanfile.py index 1c99d380cddf1..5a538da9728ae 100644 --- a/recipes/wt/all/conanfile.py +++ b/recipes/wt/all/conanfile.py @@ -41,7 +41,7 @@ class WtConan(ConanFile): 'with_postgres': True, 'with_mysql': True, 'with_mssql': False, - 'with_test': True, + 'with_test': False, 'with_dbo': True, 'with_opengl': False, 'with_unwind': True, diff --git a/recipes/wt/all/test_package/conanfile.py b/recipes/wt/all/test_package/conanfile.py index a8ae21419669e..330359d0e7d9c 100644 --- a/recipes/wt/all/test_package/conanfile.py +++ b/recipes/wt/all/test_package/conanfile.py @@ -15,5 +15,6 @@ def build(self): def test(self): if not tools.cross_building(self.settings): bin_path = os.path.join("bin", "test_package") - self.run(bin_path, run_environment=True) + args = " --docroot . --http-listen http://127.0.0.1:8080" + self.run(bin_path + args, run_environment=True) diff --git a/recipes/wt/all/test_package/test_package.cpp b/recipes/wt/all/test_package/test_package.cpp index 28fc4adfe1932..b29b66f7ec10f 100644 --- a/recipes/wt/all/test_package/test_package.cpp +++ b/recipes/wt/all/test_package/test_package.cpp @@ -1,6 +1,7 @@ #include <cstdlib> #include <iostream> #include <Wt/WLength.h> +#include <Wt/WServer.h> #ifdef WITH_DBO @@ -9,12 +10,14 @@ #endif -int main() +int main(int argc, char** argv) { Wt::WLength l("10px"); #ifdef WITH_DBO Wt::Dbo::Session session; #endif + Wt::WServer server(argc, argv, WTHTTP_CONFIGURATION); + return EXIT_SUCCESS; }
getpelican__pelican-880
Exception on WP import looking for <pre> tag Another quick one. In `decode_wp_content()`: ``` python start = pre_part.index("<pre") ``` should be: ``` python start = pre_part.find("<pre") ``` because the next line checks for `start == -1`, but `index()` throws `ValueError` when the value can't be found, whereas `find()` returns -1. I can send a PR for this tomorrow, but wanted to make sure I reported it tonight.
[ { "content": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nimport argparse\ntry:\n # py3k import\n from html.parser import HTMLParser\nexcept ImportError:\n # py2 import\n from HTMLParser import HTMLParser # NOQA\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\nimport logging\n\nfrom codecs import open\n\nfrom pelican.utils import slugify\nfrom pelican.log import init\n\nlogger = logging.getLogger(__name__)\n\n\ndef decode_wp_content(content, br=True):\n pre_tags = {}\n if content.strip() == \"\":\n return \"\"\n\n content += \"\\n\"\n if \"<pre\" in content:\n pre_parts = content.split(\"</pre>\")\n last_pre = pre_parts.pop()\n content = \"\"\n pre_index = 0\n\n for pre_part in pre_parts:\n start = pre_part.index(\"<pre\")\n if start == -1:\n content = content + pre_part\n continue\n name = \"<pre wp-pre-tag-{0}></pre>\".format(pre_index)\n pre_tags[name] = pre_part[start:] + \"</pre>\"\n content = content + pre_part[0:start] + name\n pre_index += 1\n content = content + last_pre\n\n content = re.sub(r'<br />\\s*<br />', \"\\n\\n\", content)\n allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'\n 'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'\n 'map|area|blockquote|address|math|style|p|h[1-6]|hr|'\n 'fieldset|noscript|samp|legend|section|article|aside|'\n 'hgroup|header|footer|nav|figure|figcaption|details|'\n 'menu|summary)')\n content = re.sub(r'(<' + allblocks + r'[^>]*>)', \"\\n\\\\1\", content)\n content = re.sub(r'(</' + allblocks + r'>)', \"\\\\1\\n\\n\", content)\n # content = content.replace(\"\\r\\n\", \"\\n\")\n if \"<object\" in content:\n # no <p> inside object/embed\n content = re.sub(r'\\s*<param([^>]*)>\\s*', \"<param\\\\1>\", content)\n content = re.sub(r'\\s*</embed>\\s*', '</embed>', content)\n # content = re.sub(r'/\\n\\n+/', '\\n\\n', content)\n pgraphs = filter(lambda s: s != \"\", re.split(r'\\n\\s*\\n', content))\n content = \"\"\n for p in pgraphs:\n content = content + \"<p>\" + p.strip() + \"</p>\\n\"\n # under certain strange conditions it could create a P of entirely whitespace\n content = re.sub(r'<p>\\s*</p>', '', content)\n content = re.sub(r'<p>([^<]+)</(div|address|form)>', \"<p>\\\\1</p></\\\\2>\", content)\n # don't wrap tags\n content = re.sub(r'<p>\\s*(</?' + allblocks + r'[^>]*>)\\s*</p>', \"\\\\1\", content)\n #problem with nested lists\n content = re.sub(r'<p>(<li.*)</p>', \"\\\\1\", content)\n content = re.sub(r'<p><blockquote([^>]*)>', \"<blockquote\\\\1><p>\", content)\n content = content.replace('</blockquote></p>', '</p></blockquote>')\n content = re.sub(r'<p>\\s*(</?' + allblocks + '[^>]*>)', \"\\\\1\", content)\n content = re.sub(r'(</?' + allblocks + '[^>]*>)\\s*</p>', \"\\\\1\", content)\n if br:\n def _preserve_newline(match):\n return match.group(0).replace(\"\\n\", \"<WPPreserveNewline />\")\n content = re.sub(r'/<(script|style).*?<\\/\\\\1>/s', _preserve_newline, content)\n # optionally make line breaks\n content = re.sub(r'(?<!<br />)\\s*\\n', \"<br />\\n\", content)\n content = content.replace(\"<WPPreserveNewline />\", \"\\n\")\n content = re.sub(r'(</?' + allblocks + r'[^>]*>)\\s*<br />', \"\\\\1\", content)\n content = re.sub(r'<br />(\\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\\\1', content)\n content = re.sub(r'\\n</p>', \"</p>\", content)\n\n if pre_tags:\n def _multi_replace(dic, string):\n pattern = r'|'.join(map(re.escape, dic.keys()))\n return re.sub(pattern, lambda m: dic[m.group()], string)\n content = _multi_replace(pre_tags, content)\n\n return content\n\n\ndef wp2fields(xml):\n \"\"\"Opens a wordpress XML file, and yield pelican fields\"\"\"\n try:\n from bs4 import BeautifulSoup\n except ImportError:\n error = ('Missing dependency '\n '\"BeautifulSoup4\" and \"lxml\" required to import Wordpress XML files.')\n sys.exit(error)\n\n\n with open(xml, encoding='utf-8') as infile:\n xmlfile = infile.read()\n soup = BeautifulSoup(xmlfile, \"xml\")\n items = soup.rss.channel.findAll('item')\n\n for item in items:\n\n if item.find('status').string == \"publish\":\n\n try:\n # Use HTMLParser due to issues with BeautifulSoup 3\n title = HTMLParser().unescape(item.title.contents[0])\n except IndexError:\n title = 'No title [%s]' % item.find('post_name').string\n logger.warn('Post \"%s\" is lacking a proper title' % title)\n\n content = item.find('encoded').string\n filename = item.find('post_name').string\n\n raw_date = item.find('post_date').string\n date_object = time.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\n date = time.strftime(\"%Y-%m-%d %H:%M\", date_object)\n author = item.find('creator').string\n \n categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]\n # caturl = [cat['nicename'] for cat in item.find(domain='category')]\n\n tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]\n\n yield (title, content, filename, date, author, categories, tags, \"wp-html\")\n\ndef dc2fields(file):\n \"\"\"Opens a Dotclear export file, and yield pelican fields\"\"\"\n try:\n from bs4 import BeautifulSoup\n except ImportError:\n error = ('Missing dependency '\n '\"BeautifulSoup4\" and \"lxml\" required to import Dotclear files.')\n sys.exit(error)\n\n\n in_cat = False\n in_post = False\n category_list = {}\n posts = []\n\n with open(file, 'r', encoding='utf-8') as f:\n\n for line in f:\n # remove final \\n\n line = line[:-1]\n\n if line.startswith('[category'):\n in_cat = True\n elif line.startswith('[post'):\n in_post = True\n elif in_cat:\n fields = line.split('\",\"')\n if not line:\n in_cat = False\n else:\n # remove 1st and last \"\"\n fields[0] = fields[0][1:]\n # fields[-1] = fields[-1][:-1]\n category_list[fields[0]]=fields[2]\n elif in_post:\n if not line:\n in_post = False\n break\n else:\n posts.append(line)\n\n print(\"%i posts read.\" % len(posts))\n\n for post in posts:\n fields = post.split('\",\"')\n\n # post_id = fields[0][1:]\n # blog_id = fields[1]\n # user_id = fields[2]\n cat_id = fields[3]\n # post_dt = fields[4]\n # post_tz = fields[5]\n post_creadt = fields[6]\n # post_upddt = fields[7]\n # post_password = fields[8]\n # post_type = fields[9]\n post_format = fields[10]\n # post_url = fields[11]\n # post_lang = fields[12]\n post_title = fields[13]\n post_excerpt = fields[14]\n post_excerpt_xhtml = fields[15]\n post_content = fields[16]\n post_content_xhtml = fields[17]\n # post_notes = fields[18]\n # post_words = fields[19]\n # post_status = fields[20]\n # post_selected = fields[21]\n # post_position = fields[22]\n # post_open_comment = fields[23]\n # post_open_tb = fields[24]\n # nb_comment = fields[25]\n # nb_trackback = fields[26]\n post_meta = fields[27]\n # redirect_url = fields[28][:-1]\n\n # remove seconds\n post_creadt = ':'.join(post_creadt.split(':')[0:2])\n\n author = \"\"\n categories = []\n tags = []\n\n if cat_id:\n categories = [category_list[id].strip() for id in cat_id.split(',')]\n\n # Get tags related to a post\n tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\\\\"tag\\\\\";a:', '').replace('a:0:', '')\n if len(tag) > 1:\n if int(tag[:1]) == 1:\n newtag = tag.split('\"')[1]\n tags.append(\n BeautifulSoup(\n newtag\n , \"xml\"\n )\n # bs4 always outputs UTF-8\n .decode('utf-8')\n )\n else:\n i=1\n j=1\n while(i <= int(tag[:1])):\n newtag = tag.split('\"')[j].replace('\\\\','')\n tags.append(\n BeautifulSoup(\n newtag\n , \"xml\"\n )\n # bs4 always outputs UTF-8\n .decode('utf-8')\n )\n i=i+1\n if j < int(tag[:1])*2:\n j=j+2\n\n \"\"\"\n dotclear2 does not use markdown by default unless you use the markdown plugin\n Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown\n \"\"\"\n if post_format == \"markdown\":\n content = post_excerpt + post_content\n else:\n content = post_excerpt_xhtml + post_content_xhtml\n content = content.replace('\\\\n', '')\n post_format = \"html\"\n\n yield (post_title, content, slugify(post_title), post_creadt, author, categories, tags, post_format)\n\n\ndef posterous2fields(api_token, email, password):\n \"\"\"Imports posterous posts\"\"\"\n import base64\n from datetime import datetime, timedelta\n try:\n # py3k import\n import json\n except ImportError:\n # py2 import\n import simplejson as json\n\n try:\n # py3k import\n import urllib.request as urllib_request\n except ImportError:\n # py2 import\n import urllib2 as urllib_request\n\n\n def get_posterous_posts(api_token, email, password, page = 1):\n base64string = base64.encodestring((\"%s:%s\" % (email, password)).encode('utf-8')).replace(b'\\n', b'')\n url = \"http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d\" % (api_token, page)\n request = urllib_request.Request(url)\n request.add_header(\"Authorization\", \"Basic %s\" % base64string.decode())\n handle = urllib_request.urlopen(request)\n posts = json.loads(handle.read().decode('utf-8'))\n return posts\n\n page = 1\n posts = get_posterous_posts(api_token, email, password, page)\n while len(posts) > 0:\n posts = get_posterous_posts(api_token, email, password, page)\n page += 1\n\n for post in posts:\n slug = post.get('slug')\n if not slug:\n slug = slugify(post.get('title'))\n tags = [tag.get('name') for tag in post.get('tags')]\n raw_date = post.get('display_date')\n date_object = datetime.strptime(raw_date[:-6], \"%Y/%m/%d %H:%M:%S\")\n offset = int(raw_date[-5:])\n delta = timedelta(hours = offset / 100)\n date_object -= delta\n date = date_object.strftime(\"%Y-%m-%d %H:%M\")\n\n yield (post.get('title'), post.get('body_cleaned'), slug, date,\n post.get('user').get('display_name'), [], tags, \"html\")\n\ndef feed2fields(file):\n \"\"\"Read a feed and yield pelican fields\"\"\"\n import feedparser\n d = feedparser.parse(file)\n for entry in d.entries:\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\n if hasattr(entry, \"updated_parsed\") else None)\n author = entry.author if hasattr(entry, \"author\") else None\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\n\n slug = slugify(entry.title)\n yield (entry.title, entry.description, slug, date, author, [], tags, \"html\")\n\n\ndef build_header(title, date, author, categories, tags, slug):\n \"\"\"Build a header from a list of fields\"\"\"\n header = '%s\\n%s\\n' % (title, '#' * len(title))\n if date:\n header += ':date: %s\\n' % date\n if author:\n header += ':author: %s\\n' % author\n if categories:\n header += ':category: %s\\n' % ', '.join(categories)\n if tags:\n header += ':tags: %s\\n' % ', '.join(tags)\n if slug:\n header += ':slug: %s\\n' % slug\n header += '\\n'\n return header\n\ndef build_markdown_header(title, date, author, categories, tags, slug):\n \"\"\"Build a header from a list of fields\"\"\"\n header = 'Title: %s\\n' % title\n if date:\n header += 'Date: %s\\n' % date\n if author:\n header += 'Author: %s\\n' % author\n if categories:\n header += 'Category: %s\\n' % ', '.join(categories)\n if tags:\n header += 'Tags: %s\\n' % ', '.join(tags)\n if slug:\n header += 'Slug: %s\\n' % slug\n header += '\\n'\n return header\n\ndef fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=False, disable_slugs=False):\n for title, content, filename, date, author, categories, tags, in_markup in fields:\n slug = not disable_slugs and filename or None\n if (in_markup == \"markdown\") or (out_markup == \"markdown\") :\n ext = '.md'\n header = build_markdown_header(title, date, author, categories, tags, slug)\n else:\n out_markup = \"rst\"\n ext = '.rst'\n header = build_header(title, date, author, categories, tags, slug)\n\n filename = os.path.basename(filename)\n\n # Enforce filename restrictions for various filesystems at once; see\n # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words\n # we do not need to filter words because an extension will be appended\n filename = re.sub(r'[<>:\"/\\\\|?*^% ]', '-', filename) # invalid chars\n filename = filename.lstrip('.') # should not start with a dot\n if not filename:\n filename = '_'\n filename = filename[:249] # allow for 5 extra characters\n\n # option to put files in directories with categories names\n if dircat and (len(categories) > 0):\n catname = slugify(categories[0])\n out_filename = os.path.join(output_path, catname, filename+ext)\n if not os.path.isdir(os.path.join(output_path, catname)):\n os.mkdir(os.path.join(output_path, catname))\n else:\n out_filename = os.path.join(output_path, filename+ext)\n\n print(out_filename)\n\n if in_markup in (\"html\", \"wp-html\"):\n html_filename = os.path.join(output_path, filename+'.html')\n\n with open(html_filename, 'w', encoding='utf-8') as fp:\n # Replace newlines with paragraphs wrapped with <p> so\n # HTML is valid before conversion\n if in_markup == \"wp-html\":\n new_content = decode_wp_content(content)\n else:\n paragraphs = content.splitlines()\n paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]\n new_content = ''.join(paragraphs)\n\n fp.write(new_content)\n\n\n parse_raw = '--parse-raw' if not strip_raw else ''\n cmd = ('pandoc --normalize --reference-links {0} --from=html'\n ' --to={1} -o \"{2}\" \"{3}\"').format(\n parse_raw, out_markup, out_filename, html_filename)\n\n try:\n rc = subprocess.call(cmd, shell=True)\n if rc < 0:\n error = \"Child was terminated by signal %d\" % -rc\n exit(error)\n\n elif rc > 0:\n error = \"Please, check your Pandoc installation.\"\n exit(error)\n except OSError as e:\n error = \"Pandoc execution failed: %s\" % e\n exit(error)\n\n os.remove(html_filename)\n\n with open(out_filename, 'r', encoding='utf-8') as fs:\n content = fs.read()\n if out_markup == \"markdown\":\n # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line\n content = content.replace(\"\\\\\\n \", \" \\n\")\n content = content.replace(\"\\\\\\n\", \" \\n\")\n\n with open(out_filename, 'w', encoding='utf-8') as fs:\n fs.write(header + content)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Transform feed, Wordpress or Dotclear files to reST (rst) \"\n \"or Markdown (md) files. Be sure to have pandoc installed.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(dest='input', help='The input file to read')\n parser.add_argument('--wpfile', action='store_true', dest='wpfile',\n help='Wordpress XML export')\n parser.add_argument('--dotclear', action='store_true', dest='dotclear',\n help='Dotclear export')\n parser.add_argument('--posterous', action='store_true', dest='posterous',\n help='Posterous export')\n parser.add_argument('--feed', action='store_true', dest='feed',\n help='Feed to parse')\n parser.add_argument('-o', '--output', dest='output', default='output',\n help='Output path')\n parser.add_argument('-m', '--markup', dest='markup', default='rst',\n help='Output markup format (supports rst & markdown)')\n parser.add_argument('--dir-cat', action='store_true', dest='dircat',\n help='Put files in directories with categories name')\n parser.add_argument('--strip-raw', action='store_true', dest='strip_raw',\n help=\"Strip raw HTML code that can't be converted to \"\n \"markup such as flash embeds or iframes (wordpress import only)\")\n parser.add_argument('--disable-slugs', action='store_true',\n dest='disable_slugs',\n help='Disable storing slugs from imported posts within output. '\n 'With this disabled, your Pelican URLs may not be consistent '\n 'with your original posts.')\n parser.add_argument('-e', '--email', dest='email',\n help=\"Email address (posterous import only)\")\n parser.add_argument('-p', '--password', dest='password',\n help=\"Password (posterous import only)\")\n\n args = parser.parse_args()\n\n input_type = None\n if args.wpfile:\n input_type = 'wordpress'\n elif args.dotclear:\n input_type = 'dotclear'\n elif args.posterous:\n input_type = 'posterous'\n elif args.feed:\n input_type = 'feed'\n else:\n error = \"You must provide either --wpfile, --dotclear, --posterous or --feed options\"\n exit(error)\n\n if not os.path.exists(args.output):\n try:\n os.mkdir(args.output)\n except OSError:\n error = \"Unable to create the output folder: \" + args.output\n exit(error)\n\n if input_type == 'wordpress':\n fields = wp2fields(args.input)\n elif input_type == 'dotclear':\n fields = dc2fields(args.input)\n elif input_type == 'posterous':\n fields = posterous2fields(args.input, args.email, args.password)\n elif input_type == 'feed':\n fields = feed2fields(args.input)\n\n init() # init logging\n\n fields2pelican(fields, args.markup, args.output,\n dircat=args.dircat or False,\n strip_raw=args.strip_raw or False,\n disable_slugs=args.disable_slugs or False)\n", "path": "pelican/tools/pelican_import.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nimport argparse\ntry:\n # py3k import\n from html.parser import HTMLParser\nexcept ImportError:\n # py2 import\n from HTMLParser import HTMLParser # NOQA\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\nimport logging\n\nfrom codecs import open\n\nfrom pelican.utils import slugify\nfrom pelican.log import init\n\nlogger = logging.getLogger(__name__)\n\n\ndef decode_wp_content(content, br=True):\n pre_tags = {}\n if content.strip() == \"\":\n return \"\"\n\n content += \"\\n\"\n if \"<pre\" in content:\n pre_parts = content.split(\"</pre>\")\n last_pre = pre_parts.pop()\n content = \"\"\n pre_index = 0\n\n for pre_part in pre_parts:\n start = pre_part.find(\"<pre\")\n if start == -1:\n content = content + pre_part\n continue\n name = \"<pre wp-pre-tag-{0}></pre>\".format(pre_index)\n pre_tags[name] = pre_part[start:] + \"</pre>\"\n content = content + pre_part[0:start] + name\n pre_index += 1\n content = content + last_pre\n\n content = re.sub(r'<br />\\s*<br />', \"\\n\\n\", content)\n allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'\n 'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'\n 'map|area|blockquote|address|math|style|p|h[1-6]|hr|'\n 'fieldset|noscript|samp|legend|section|article|aside|'\n 'hgroup|header|footer|nav|figure|figcaption|details|'\n 'menu|summary)')\n content = re.sub(r'(<' + allblocks + r'[^>]*>)', \"\\n\\\\1\", content)\n content = re.sub(r'(</' + allblocks + r'>)', \"\\\\1\\n\\n\", content)\n # content = content.replace(\"\\r\\n\", \"\\n\")\n if \"<object\" in content:\n # no <p> inside object/embed\n content = re.sub(r'\\s*<param([^>]*)>\\s*', \"<param\\\\1>\", content)\n content = re.sub(r'\\s*</embed>\\s*', '</embed>', content)\n # content = re.sub(r'/\\n\\n+/', '\\n\\n', content)\n pgraphs = filter(lambda s: s != \"\", re.split(r'\\n\\s*\\n', content))\n content = \"\"\n for p in pgraphs:\n content = content + \"<p>\" + p.strip() + \"</p>\\n\"\n # under certain strange conditions it could create a P of entirely whitespace\n content = re.sub(r'<p>\\s*</p>', '', content)\n content = re.sub(r'<p>([^<]+)</(div|address|form)>', \"<p>\\\\1</p></\\\\2>\", content)\n # don't wrap tags\n content = re.sub(r'<p>\\s*(</?' + allblocks + r'[^>]*>)\\s*</p>', \"\\\\1\", content)\n #problem with nested lists\n content = re.sub(r'<p>(<li.*)</p>', \"\\\\1\", content)\n content = re.sub(r'<p><blockquote([^>]*)>', \"<blockquote\\\\1><p>\", content)\n content = content.replace('</blockquote></p>', '</p></blockquote>')\n content = re.sub(r'<p>\\s*(</?' + allblocks + '[^>]*>)', \"\\\\1\", content)\n content = re.sub(r'(</?' + allblocks + '[^>]*>)\\s*</p>', \"\\\\1\", content)\n if br:\n def _preserve_newline(match):\n return match.group(0).replace(\"\\n\", \"<WPPreserveNewline />\")\n content = re.sub(r'/<(script|style).*?<\\/\\\\1>/s', _preserve_newline, content)\n # optionally make line breaks\n content = re.sub(r'(?<!<br />)\\s*\\n', \"<br />\\n\", content)\n content = content.replace(\"<WPPreserveNewline />\", \"\\n\")\n content = re.sub(r'(</?' + allblocks + r'[^>]*>)\\s*<br />', \"\\\\1\", content)\n content = re.sub(r'<br />(\\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\\\1', content)\n content = re.sub(r'\\n</p>', \"</p>\", content)\n\n if pre_tags:\n def _multi_replace(dic, string):\n pattern = r'|'.join(map(re.escape, dic.keys()))\n return re.sub(pattern, lambda m: dic[m.group()], string)\n content = _multi_replace(pre_tags, content)\n\n return content\n\n\ndef wp2fields(xml):\n \"\"\"Opens a wordpress XML file, and yield pelican fields\"\"\"\n try:\n from bs4 import BeautifulSoup\n except ImportError:\n error = ('Missing dependency '\n '\"BeautifulSoup4\" and \"lxml\" required to import Wordpress XML files.')\n sys.exit(error)\n\n\n with open(xml, encoding='utf-8') as infile:\n xmlfile = infile.read()\n soup = BeautifulSoup(xmlfile, \"xml\")\n items = soup.rss.channel.findAll('item')\n\n for item in items:\n\n if item.find('status').string == \"publish\":\n\n try:\n # Use HTMLParser due to issues with BeautifulSoup 3\n title = HTMLParser().unescape(item.title.contents[0])\n except IndexError:\n title = 'No title [%s]' % item.find('post_name').string\n logger.warn('Post \"%s\" is lacking a proper title' % title)\n\n content = item.find('encoded').string\n filename = item.find('post_name').string\n\n raw_date = item.find('post_date').string\n date_object = time.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\n date = time.strftime(\"%Y-%m-%d %H:%M\", date_object)\n author = item.find('creator').string\n \n categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]\n # caturl = [cat['nicename'] for cat in item.find(domain='category')]\n\n tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]\n\n yield (title, content, filename, date, author, categories, tags, \"wp-html\")\n\ndef dc2fields(file):\n \"\"\"Opens a Dotclear export file, and yield pelican fields\"\"\"\n try:\n from bs4 import BeautifulSoup\n except ImportError:\n error = ('Missing dependency '\n '\"BeautifulSoup4\" and \"lxml\" required to import Dotclear files.')\n sys.exit(error)\n\n\n in_cat = False\n in_post = False\n category_list = {}\n posts = []\n\n with open(file, 'r', encoding='utf-8') as f:\n\n for line in f:\n # remove final \\n\n line = line[:-1]\n\n if line.startswith('[category'):\n in_cat = True\n elif line.startswith('[post'):\n in_post = True\n elif in_cat:\n fields = line.split('\",\"')\n if not line:\n in_cat = False\n else:\n # remove 1st and last \"\"\n fields[0] = fields[0][1:]\n # fields[-1] = fields[-1][:-1]\n category_list[fields[0]]=fields[2]\n elif in_post:\n if not line:\n in_post = False\n break\n else:\n posts.append(line)\n\n print(\"%i posts read.\" % len(posts))\n\n for post in posts:\n fields = post.split('\",\"')\n\n # post_id = fields[0][1:]\n # blog_id = fields[1]\n # user_id = fields[2]\n cat_id = fields[3]\n # post_dt = fields[4]\n # post_tz = fields[5]\n post_creadt = fields[6]\n # post_upddt = fields[7]\n # post_password = fields[8]\n # post_type = fields[9]\n post_format = fields[10]\n # post_url = fields[11]\n # post_lang = fields[12]\n post_title = fields[13]\n post_excerpt = fields[14]\n post_excerpt_xhtml = fields[15]\n post_content = fields[16]\n post_content_xhtml = fields[17]\n # post_notes = fields[18]\n # post_words = fields[19]\n # post_status = fields[20]\n # post_selected = fields[21]\n # post_position = fields[22]\n # post_open_comment = fields[23]\n # post_open_tb = fields[24]\n # nb_comment = fields[25]\n # nb_trackback = fields[26]\n post_meta = fields[27]\n # redirect_url = fields[28][:-1]\n\n # remove seconds\n post_creadt = ':'.join(post_creadt.split(':')[0:2])\n\n author = \"\"\n categories = []\n tags = []\n\n if cat_id:\n categories = [category_list[id].strip() for id in cat_id.split(',')]\n\n # Get tags related to a post\n tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\\\\"tag\\\\\";a:', '').replace('a:0:', '')\n if len(tag) > 1:\n if int(tag[:1]) == 1:\n newtag = tag.split('\"')[1]\n tags.append(\n BeautifulSoup(\n newtag\n , \"xml\"\n )\n # bs4 always outputs UTF-8\n .decode('utf-8')\n )\n else:\n i=1\n j=1\n while(i <= int(tag[:1])):\n newtag = tag.split('\"')[j].replace('\\\\','')\n tags.append(\n BeautifulSoup(\n newtag\n , \"xml\"\n )\n # bs4 always outputs UTF-8\n .decode('utf-8')\n )\n i=i+1\n if j < int(tag[:1])*2:\n j=j+2\n\n \"\"\"\n dotclear2 does not use markdown by default unless you use the markdown plugin\n Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown\n \"\"\"\n if post_format == \"markdown\":\n content = post_excerpt + post_content\n else:\n content = post_excerpt_xhtml + post_content_xhtml\n content = content.replace('\\\\n', '')\n post_format = \"html\"\n\n yield (post_title, content, slugify(post_title), post_creadt, author, categories, tags, post_format)\n\n\ndef posterous2fields(api_token, email, password):\n \"\"\"Imports posterous posts\"\"\"\n import base64\n from datetime import datetime, timedelta\n try:\n # py3k import\n import json\n except ImportError:\n # py2 import\n import simplejson as json\n\n try:\n # py3k import\n import urllib.request as urllib_request\n except ImportError:\n # py2 import\n import urllib2 as urllib_request\n\n\n def get_posterous_posts(api_token, email, password, page = 1):\n base64string = base64.encodestring((\"%s:%s\" % (email, password)).encode('utf-8')).replace(b'\\n', b'')\n url = \"http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d\" % (api_token, page)\n request = urllib_request.Request(url)\n request.add_header(\"Authorization\", \"Basic %s\" % base64string.decode())\n handle = urllib_request.urlopen(request)\n posts = json.loads(handle.read().decode('utf-8'))\n return posts\n\n page = 1\n posts = get_posterous_posts(api_token, email, password, page)\n while len(posts) > 0:\n posts = get_posterous_posts(api_token, email, password, page)\n page += 1\n\n for post in posts:\n slug = post.get('slug')\n if not slug:\n slug = slugify(post.get('title'))\n tags = [tag.get('name') for tag in post.get('tags')]\n raw_date = post.get('display_date')\n date_object = datetime.strptime(raw_date[:-6], \"%Y/%m/%d %H:%M:%S\")\n offset = int(raw_date[-5:])\n delta = timedelta(hours = offset / 100)\n date_object -= delta\n date = date_object.strftime(\"%Y-%m-%d %H:%M\")\n\n yield (post.get('title'), post.get('body_cleaned'), slug, date,\n post.get('user').get('display_name'), [], tags, \"html\")\n\ndef feed2fields(file):\n \"\"\"Read a feed and yield pelican fields\"\"\"\n import feedparser\n d = feedparser.parse(file)\n for entry in d.entries:\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\n if hasattr(entry, \"updated_parsed\") else None)\n author = entry.author if hasattr(entry, \"author\") else None\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\n\n slug = slugify(entry.title)\n yield (entry.title, entry.description, slug, date, author, [], tags, \"html\")\n\n\ndef build_header(title, date, author, categories, tags, slug):\n \"\"\"Build a header from a list of fields\"\"\"\n header = '%s\\n%s\\n' % (title, '#' * len(title))\n if date:\n header += ':date: %s\\n' % date\n if author:\n header += ':author: %s\\n' % author\n if categories:\n header += ':category: %s\\n' % ', '.join(categories)\n if tags:\n header += ':tags: %s\\n' % ', '.join(tags)\n if slug:\n header += ':slug: %s\\n' % slug\n header += '\\n'\n return header\n\ndef build_markdown_header(title, date, author, categories, tags, slug):\n \"\"\"Build a header from a list of fields\"\"\"\n header = 'Title: %s\\n' % title\n if date:\n header += 'Date: %s\\n' % date\n if author:\n header += 'Author: %s\\n' % author\n if categories:\n header += 'Category: %s\\n' % ', '.join(categories)\n if tags:\n header += 'Tags: %s\\n' % ', '.join(tags)\n if slug:\n header += 'Slug: %s\\n' % slug\n header += '\\n'\n return header\n\ndef fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=False, disable_slugs=False):\n for title, content, filename, date, author, categories, tags, in_markup in fields:\n slug = not disable_slugs and filename or None\n if (in_markup == \"markdown\") or (out_markup == \"markdown\") :\n ext = '.md'\n header = build_markdown_header(title, date, author, categories, tags, slug)\n else:\n out_markup = \"rst\"\n ext = '.rst'\n header = build_header(title, date, author, categories, tags, slug)\n\n filename = os.path.basename(filename)\n\n # Enforce filename restrictions for various filesystems at once; see\n # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words\n # we do not need to filter words because an extension will be appended\n filename = re.sub(r'[<>:\"/\\\\|?*^% ]', '-', filename) # invalid chars\n filename = filename.lstrip('.') # should not start with a dot\n if not filename:\n filename = '_'\n filename = filename[:249] # allow for 5 extra characters\n\n # option to put files in directories with categories names\n if dircat and (len(categories) > 0):\n catname = slugify(categories[0])\n out_filename = os.path.join(output_path, catname, filename+ext)\n if not os.path.isdir(os.path.join(output_path, catname)):\n os.mkdir(os.path.join(output_path, catname))\n else:\n out_filename = os.path.join(output_path, filename+ext)\n\n print(out_filename)\n\n if in_markup in (\"html\", \"wp-html\"):\n html_filename = os.path.join(output_path, filename+'.html')\n\n with open(html_filename, 'w', encoding='utf-8') as fp:\n # Replace newlines with paragraphs wrapped with <p> so\n # HTML is valid before conversion\n if in_markup == \"wp-html\":\n new_content = decode_wp_content(content)\n else:\n paragraphs = content.splitlines()\n paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]\n new_content = ''.join(paragraphs)\n\n fp.write(new_content)\n\n\n parse_raw = '--parse-raw' if not strip_raw else ''\n cmd = ('pandoc --normalize --reference-links {0} --from=html'\n ' --to={1} -o \"{2}\" \"{3}\"').format(\n parse_raw, out_markup, out_filename, html_filename)\n\n try:\n rc = subprocess.call(cmd, shell=True)\n if rc < 0:\n error = \"Child was terminated by signal %d\" % -rc\n exit(error)\n\n elif rc > 0:\n error = \"Please, check your Pandoc installation.\"\n exit(error)\n except OSError as e:\n error = \"Pandoc execution failed: %s\" % e\n exit(error)\n\n os.remove(html_filename)\n\n with open(out_filename, 'r', encoding='utf-8') as fs:\n content = fs.read()\n if out_markup == \"markdown\":\n # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line\n content = content.replace(\"\\\\\\n \", \" \\n\")\n content = content.replace(\"\\\\\\n\", \" \\n\")\n\n with open(out_filename, 'w', encoding='utf-8') as fs:\n fs.write(header + content)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Transform feed, Wordpress or Dotclear files to reST (rst) \"\n \"or Markdown (md) files. Be sure to have pandoc installed.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(dest='input', help='The input file to read')\n parser.add_argument('--wpfile', action='store_true', dest='wpfile',\n help='Wordpress XML export')\n parser.add_argument('--dotclear', action='store_true', dest='dotclear',\n help='Dotclear export')\n parser.add_argument('--posterous', action='store_true', dest='posterous',\n help='Posterous export')\n parser.add_argument('--feed', action='store_true', dest='feed',\n help='Feed to parse')\n parser.add_argument('-o', '--output', dest='output', default='output',\n help='Output path')\n parser.add_argument('-m', '--markup', dest='markup', default='rst',\n help='Output markup format (supports rst & markdown)')\n parser.add_argument('--dir-cat', action='store_true', dest='dircat',\n help='Put files in directories with categories name')\n parser.add_argument('--strip-raw', action='store_true', dest='strip_raw',\n help=\"Strip raw HTML code that can't be converted to \"\n \"markup such as flash embeds or iframes (wordpress import only)\")\n parser.add_argument('--disable-slugs', action='store_true',\n dest='disable_slugs',\n help='Disable storing slugs from imported posts within output. '\n 'With this disabled, your Pelican URLs may not be consistent '\n 'with your original posts.')\n parser.add_argument('-e', '--email', dest='email',\n help=\"Email address (posterous import only)\")\n parser.add_argument('-p', '--password', dest='password',\n help=\"Password (posterous import only)\")\n\n args = parser.parse_args()\n\n input_type = None\n if args.wpfile:\n input_type = 'wordpress'\n elif args.dotclear:\n input_type = 'dotclear'\n elif args.posterous:\n input_type = 'posterous'\n elif args.feed:\n input_type = 'feed'\n else:\n error = \"You must provide either --wpfile, --dotclear, --posterous or --feed options\"\n exit(error)\n\n if not os.path.exists(args.output):\n try:\n os.mkdir(args.output)\n except OSError:\n error = \"Unable to create the output folder: \" + args.output\n exit(error)\n\n if input_type == 'wordpress':\n fields = wp2fields(args.input)\n elif input_type == 'dotclear':\n fields = dc2fields(args.input)\n elif input_type == 'posterous':\n fields = posterous2fields(args.input, args.email, args.password)\n elif input_type == 'feed':\n fields = feed2fields(args.input)\n\n init() # init logging\n\n fields2pelican(fields, args.markup, args.output,\n dircat=args.dircat or False,\n strip_raw=args.strip_raw or False,\n disable_slugs=args.disable_slugs or False)\n", "path": "pelican/tools/pelican_import.py" } ]
diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py index 9e477c2cf..8ebb7659b 100755 --- a/pelican/tools/pelican_import.py +++ b/pelican/tools/pelican_import.py @@ -37,7 +37,7 @@ def decode_wp_content(content, br=True): pre_index = 0 for pre_part in pre_parts: - start = pre_part.index("<pre") + start = pre_part.find("<pre") if start == -1: content = content + pre_part continue
mitmproxy__mitmproxy-5476
ValueError: mutable default <class 'mitmproxy.contentviews.grpc.ProtoParser.ParserOptions'> for field parser_options is not allowed: use default_factory on python 3.11 #### Problem Description mitmproxy fails to start throwing a `ValueError` exception: ``` ValueError: mutable default <class 'mitmproxy.contentviews.grpc.ProtoParser.ParserOptions'> for field parser_options is not allowed: use default_factory ``` #### Steps to reproduce the behavior: 1. Install mitmproxy 8.1.1 on Fedora rawhide (37) 2. run the binary #### System Information ``` $ /usr/bin/mitmproxy --version Traceback (most recent call last): File "/usr/bin/mitmproxy", line 8, in <module> sys.exit(mitmproxy()) ^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mitmproxy/tools/main.py", line 118, in mitmproxy from mitmproxy.tools import console File "/usr/lib/python3.11/site-packages/mitmproxy/tools/console/__init__.py", line 1, in <module> from mitmproxy.tools.console import master File "/usr/lib/python3.11/site-packages/mitmproxy/tools/console/master.py", line 26, in <module> from mitmproxy.tools.console import consoleaddons File "/usr/lib/python3.11/site-packages/mitmproxy/tools/console/consoleaddons.py", line 6, in <module> from mitmproxy import contentviews File "/usr/lib/python3.11/site-packages/mitmproxy/contentviews/__init__.py", line 23, in <module> from . import ( File "/usr/lib/python3.11/site-packages/mitmproxy/contentviews/grpc.py", line 952, in <module> @dataclass ^^^^^^^^^ File "/usr/lib64/python3.11/dataclasses.py", line 1221, in dataclass return wrap(cls) ^^^^^^^^^ File "/usr/lib64/python3.11/dataclasses.py", line 1211, in wrap return _process_class(cls, init, repr, eq, order, unsafe_hash, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib64/python3.11/dataclasses.py", line 959, in _process_class cls_fields.append(_get_field(cls, name, type, kw_only)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/lib64/python3.11/dataclasses.py", line 816, in _get_field raise ValueError(f'mutable default {type(f.default)} for field ' ValueError: mutable default <class 'mitmproxy.contentviews.grpc.ProtoParser.ParserOptions'> for field parser_options is not allowed: use default_factory ```
[ { "content": "from __future__ import annotations\n\nimport struct\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Generator, Iterable, Iterator\n\nfrom mitmproxy import contentviews, ctx, flow, flowfilter, http\nfrom mitmproxy.contentviews import base\nfrom mitmproxy.net.encoding import decode\n\n\nclass ProtoParser:\n @dataclass\n class ParserRule:\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n Matching on flow-level also means, a match applies to request AND response messages.\n To restrict a rule to a requests only use 'ParserRuleRequest', instead.\n To restrict a rule to a responses only use 'ParserRuleResponse', instead.\n \"\"\"\n\n field_definitions: list[ProtoParser.ParserFieldDefinition]\n \"\"\"List of field definitions for this rule \"\"\"\n\n name: str = \"\"\n \"\"\"Name of this rule, only used for debugging\"\"\"\n\n filter: str = \"\"\n \"\"\"\n Flowfilter to select which flows to apply to ('~q' and '~s' can not be used to distinguish\n if the rule should apply to the request or response of a flow. To do so, use ParserRuleRequest\n or ParserRuleResponse. ParserRule always applies to request and response.)\n \"\"\"\n\n @dataclass\n class ParserRuleResponse(ParserRule):\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n The rule only applies if the processed message is a server response.\n \"\"\"\n\n @dataclass\n class ParserRuleRequest(ParserRule):\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n The rule only applies if the processed message is a client request.\n \"\"\"\n\n @dataclass\n class ParserFieldDefinition:\n \"\"\"\n Defines how to parse a field (or multiple fields with the same tag) in a protobuf messages.\n\n This allows to apply an intended decoding (f.e. decode uint64 as double instead) and to assign\n a descriptive name to a field. Field definitions are aggregated into rules, which also holds\n a filter to match selected HTTP messages.\n\n The most natural way to use this, is to describe known parts of a single protobuf message\n in a set of field descriptors, pack them into a rule and set the filter of the rule in a way,\n that it only applies to proper protobuf messages (f.e. to request traffic against an API endpoint\n matched by an URL flowfilter)\n \"\"\"\n\n # A 'tag' could be considered as \"absolute path\" to match a unique field, yet\n # protobuf allows to uses the same nested message in different positions of the parent message\n # The 'tag_prefixes' parameter allows to apply the field definition to different \"leafs nodes\"\n # of a message.\n #\n # Example 1: match a single, absolute tag\n # ----------\n # tag = '1.2'\n # tag_prefixes = [] (default)\n #\n # applies to: tag '1.2'\n #\n # Example 2: match multiple tags with same ending\n # ----------\n # tag = '1.3'\n # tag_prefixes = ['1.2.', '2.5.']\n #\n # applies to: tag '1.2.1.3' and tag '2.5.1.3'\n # does not apply to: '1.3', unless tag_prefixes is extended to tag_prefixes = ['1.2', '2.5', '']\n #\n # Example 3: match multiple tags\n # ----------\n # tag = ''\n # tag_prefixes = ['1.2', '2.5']\n #\n # applies to: tag '1.2' and tag '1.5'\n\n tag: str\n \"\"\"Field tag for which this description applies (including flattened tag path, f.e. '1.2.2.4')\"\"\"\n\n tag_prefixes: list[str] = field(default_factory=list)\n \"\"\"List of prefixes for tag matching (f.e. tag_prefixes=['1.2.', '2.2.'] with tag='1' matches '1.2.1' and '2.2.1')\"\"\"\n\n intended_decoding: ProtoParser.DecodedTypes | None = None\n \"\"\"optional: intended decoding for visualization (parser fails over to alternate decoding if not possible)\"\"\"\n\n name: str | None = None\n \"\"\"optional: intended field for visualization (parser fails over to alternate decoding if not possible)\"\"\"\n\n as_packed: bool | None = False\n \"\"\"optional: if set to true, the field is considered to be repeated and packed\"\"\"\n\n @dataclass\n class ParserOptions:\n # output should contain wiretype of fields\n include_wiretype: bool = False\n\n # output should contain the fields which describe nested messages\n # (the nested messages bodies are always included, but the \"header fields\" could\n # add unnecessary output overhead)\n exclude_message_headers: bool = False\n\n # optional: rules\n # rules: List[ProtoParser.ParserRule] = field(default_factory=list)\n\n class DecodedTypes(Enum):\n # varint\n int32 = 0\n int64 = 1\n uint32 = 2\n uint64 = 3\n sint32 = 4 # ZigZag encoding\n sint64 = 5 # ZigZag encoding\n bool = 6\n enum = 7\n # bit_32\n fixed32 = 8\n sfixed32 = 9\n float = 10\n # bit_64\n fixed64 = 11\n sfixed64 = 12\n double = 13\n # len_delimited\n string = 14\n bytes = 15\n message = 16\n\n # helper\n unknown = 17\n\n @staticmethod\n def _read_base128le(data: bytes) -> tuple[int, int]:\n res = 0\n offset = 0\n while offset < len(data):\n o = data[offset]\n res += (o & 0x7F) << (7 * offset)\n offset += 1\n if o < 0x80:\n # the Kaitai parser for protobuf support base128 le values up\n # to 8 groups (bytes). Due to the nature of the encoding, each\n # group attributes 7bit to the resulting value, which give\n # a 56 bit value at maximum.\n # The values which get encoded into protobuf variable length integers,\n # on the other hand, include full 64bit types (int64, uint64, sint64).\n # This means, the Kaitai encoder can not cover the full range of\n # possible values\n #\n # This decoder puts no limitation on the maximum value of variable\n # length integers. Values exceeding 64bit have to be handled externally\n return offset, res\n raise ValueError(\"varint exceeds bounds of provided data\")\n\n @staticmethod\n def _read_u32(data: bytes) -> tuple[int, int]:\n return 4, struct.unpack(\"<I\", data[:4])[0]\n\n @staticmethod\n def _read_u64(data: bytes) -> tuple[int, int]:\n return 8, struct.unpack(\"<Q\", data[:8])[0]\n\n class WireTypes(Enum):\n varint = 0\n bit_64 = 1\n len_delimited = 2\n group_start = 3\n group_end = 4\n bit_32 = 5\n\n @staticmethod\n def read_fields(\n wire_data: bytes,\n parent_field: ProtoParser.Field | None,\n options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n ) -> list[ProtoParser.Field]:\n res: list[ProtoParser.Field] = []\n pos = 0\n while pos < len(wire_data):\n # read field key (tag and wire_type)\n offset, key = ProtoParser._read_base128le(wire_data[pos:])\n # casting raises exception for invalid WireTypes\n wt = ProtoParser.WireTypes(key & 7)\n tag = key >> 3\n pos += offset\n\n val: bytes | int\n preferred_decoding: ProtoParser.DecodedTypes\n if wt == ProtoParser.WireTypes.varint:\n offset, val = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n bl = val.bit_length()\n if bl > 64:\n preferred_decoding = ProtoParser.DecodedTypes.unknown\n if bl > 32:\n preferred_decoding = ProtoParser.DecodedTypes.uint64\n else:\n preferred_decoding = ProtoParser.DecodedTypes.uint32\n elif wt == ProtoParser.WireTypes.bit_64:\n offset, val = ProtoParser._read_u64(wire_data[pos:])\n pos += offset\n preferred_decoding = ProtoParser.DecodedTypes.fixed64\n elif wt == ProtoParser.WireTypes.len_delimited:\n offset, length = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n if length > len(wire_data[pos:]):\n raise ValueError(\"length delimited field exceeds data size\")\n val = wire_data[pos : pos + length]\n pos += length\n preferred_decoding = ProtoParser.DecodedTypes.message\n elif (\n wt == ProtoParser.WireTypes.group_start\n or wt == ProtoParser.WireTypes.group_end\n ):\n raise ValueError(f\"deprecated field: {wt}\")\n elif wt == ProtoParser.WireTypes.bit_32:\n offset, val = ProtoParser._read_u32(wire_data[pos:])\n pos += offset\n preferred_decoding = ProtoParser.DecodedTypes.fixed32\n else:\n # not reachable as if-else statements contain all possible WireTypes\n # wrong types raise Exception during typecasting in `wt = ProtoParser.WireTypes((key & 7))`\n raise ValueError(\"invalid WireType for protobuf messsage field\")\n\n field = ProtoParser.Field(\n wire_type=wt,\n preferred_decoding=preferred_decoding,\n options=options,\n rules=rules,\n tag=tag,\n wire_value=val,\n parent_field=parent_field,\n )\n res.append(field)\n\n return res\n\n @staticmethod\n def read_packed_fields(\n packed_field: ProtoParser.Field,\n ) -> list[ProtoParser.Field]:\n if not isinstance(packed_field.wire_value, bytes):\n ctx.log(type(packed_field.wire_value))\n raise ValueError(\"can not unpack field with data other than bytes\")\n wire_data: bytes = packed_field.wire_value\n tag: int = packed_field.tag\n options: ProtoParser.ParserOptions = packed_field.options\n rules: list[ProtoParser.ParserRule] = packed_field.rules\n intended_decoding: ProtoParser.DecodedTypes = packed_field.preferred_decoding\n\n # the packed field has to have WireType length delimited, whereas the contained\n # individual types have to have a different WireType, which is derived from\n # the intended decoding\n if (\n packed_field.wire_type != ProtoParser.WireTypes.len_delimited\n or not isinstance(packed_field.wire_value, bytes)\n ):\n raise ValueError(\n \"packed fields have to be embedded in a length delimited message\"\n )\n # wiretype to read has to be determined from intended decoding\n packed_wire_type: ProtoParser.WireTypes\n if (\n intended_decoding == ProtoParser.DecodedTypes.int32\n or intended_decoding == ProtoParser.DecodedTypes.int64\n or intended_decoding == ProtoParser.DecodedTypes.uint32\n or intended_decoding == ProtoParser.DecodedTypes.uint64\n or intended_decoding == ProtoParser.DecodedTypes.sint32\n or intended_decoding == ProtoParser.DecodedTypes.sint64\n or intended_decoding == ProtoParser.DecodedTypes.bool\n or intended_decoding == ProtoParser.DecodedTypes.enum\n ):\n packed_wire_type = ProtoParser.WireTypes.varint\n elif (\n intended_decoding == ProtoParser.DecodedTypes.fixed32\n or intended_decoding == ProtoParser.DecodedTypes.sfixed32\n or intended_decoding == ProtoParser.DecodedTypes.float\n ):\n packed_wire_type = ProtoParser.WireTypes.bit_32\n elif (\n intended_decoding == ProtoParser.DecodedTypes.fixed64\n or intended_decoding == ProtoParser.DecodedTypes.sfixed64\n or intended_decoding == ProtoParser.DecodedTypes.double\n ):\n packed_wire_type = ProtoParser.WireTypes.bit_64\n elif (\n intended_decoding == ProtoParser.DecodedTypes.string\n or intended_decoding == ProtoParser.DecodedTypes.bytes\n or intended_decoding == ProtoParser.DecodedTypes.message\n ):\n packed_wire_type = ProtoParser.WireTypes.len_delimited\n else:\n # should never happen, no test\n raise TypeError(\n \"Wire type could not be determined from packed decoding type\"\n )\n\n res: list[ProtoParser.Field] = []\n pos = 0\n val: bytes | int\n if packed_wire_type == ProtoParser.WireTypes.varint:\n while pos < len(wire_data):\n offset, val = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n elif packed_wire_type == ProtoParser.WireTypes.bit_64:\n if len(wire_data) % 8 != 0:\n raise ValueError(\"can not parse as packed bit64\")\n while pos < len(wire_data):\n offset, val = ProtoParser._read_u64(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n elif packed_wire_type == ProtoParser.WireTypes.len_delimited:\n while pos < len(wire_data):\n offset, length = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n val = wire_data[pos : pos + length]\n if length > len(wire_data[pos:]):\n raise ValueError(\"packed length delimited field exceeds data size\")\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n pos += length\n elif (\n packed_wire_type == ProtoParser.WireTypes.group_start\n or packed_wire_type == ProtoParser.WireTypes.group_end\n ):\n raise ValueError(\"group tags can not be encoded packed\")\n elif packed_wire_type == ProtoParser.WireTypes.bit_32:\n if len(wire_data) % 4 != 0:\n raise ValueError(\"can not parse as packed bit32\")\n while pos < len(wire_data):\n offset, val = ProtoParser._read_u32(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n else:\n # should never happen\n raise ValueError(\"invalid WireType for protobuf messsage field\")\n\n # mark parent field as packed parent (if we got here, unpacking succeeded)\n packed_field.is_packed_parent = True\n return res\n\n class Field:\n \"\"\"\n Represents a single field of a protobuf message and handles the varios encodings.\n\n As mitmproxy sees the data passing by as raw protobuf message, it only knows the\n WireTypes. Each of the WireTypes could represent different Protobuf field types.\n The exact Protobuf field type can not be determined from the wire format, thus different\n options for decoding have to be supported.\n In addition the parsed WireTypes are (intermediary) stored in Python types, which adds\n some additional overhead type conversions.\n\n WireType represented Protobuf Types Python type (intermediary)\n\n 0: varint int32, int64, uint32, uint64, enum, int (*)\n sint32, sint64 (both ZigZag encoded), int\n bool bool\n float (**)\n\n 1: bit_64 fixed64, sfixed64, int (*)\n double float\n\n 2: len_delimited string, str\n message, class 'Message'\n bytes, bytes (*)\n packed_repeated_field class 'Message' (fields with same tag)\n\n 3: group_start unused (deprecated) -\n 4: group_end unused (deprecated) -\n\n 5: bit_32 fixed32, sfixed32, int (*)\n float float\n\n (*) Note 1: Conversion between WireType and intermediary python representation\n is handled by Kaitai protobuf decoder and always uses the python\n representation marked with (*). Converting to alternative representations\n is handled inside this class.\n (**) Note 2: Varint is not used to represent floating point values, but some applications\n store native floats in uint32 protobuf types (or native double in uint64).\n Thus we allow conversion of varint to floating point values for convenience\n (A well known APIs \"hide\" GPS latitude and longitude values in varint types,\n much easier to spot such things when rendered as float)\n\n Ref: - https://developers.google.com/protocol-buffers/docs/proto3\n - https://developers.google.com/protocol-buffers/docs/encoding\n \"\"\"\n\n def __init__(\n self,\n wire_type: ProtoParser.WireTypes,\n preferred_decoding: ProtoParser.DecodedTypes,\n tag: int,\n parent_field: ProtoParser.Field | None,\n wire_value: int | bytes,\n options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n is_unpacked_children: bool = False,\n ) -> None:\n self.wire_type: ProtoParser.WireTypes = wire_type\n self.preferred_decoding: ProtoParser.DecodedTypes = preferred_decoding\n self.wire_value: int | bytes = wire_value\n self.tag: int = tag\n self.options: ProtoParser.ParserOptions = options\n self.name: str = \"\"\n self.rules: list[ProtoParser.ParserRule] = rules\n self.parent_field: ProtoParser.Field | None = parent_field\n self.is_unpacked_children: bool = (\n is_unpacked_children # marks field as being a result of unpacking\n )\n self.is_packed_parent: bool = (\n False # marks field as being parent of successfully unpacked children\n )\n self.parent_tags: list[int] = []\n if self.parent_field is not None:\n self.parent_tags = self.parent_field.parent_tags[:]\n self.parent_tags.append(self.parent_field.tag)\n self.try_unpack = False\n\n # rules can overwrite self.try_unpack\n self.apply_rules()\n # do not unpack fields which are the result of unpacking\n if parent_field is not None and self.is_unpacked_children:\n self.try_unpack = False\n\n # no tests for only_first_hit=False, as not user-changable\n def apply_rules(self, only_first_hit=True):\n tag_str = self._gen_tag_str()\n name = None\n decoding = None\n as_packed = False\n try:\n for rule in self.rules:\n for fd in rule.field_definitions:\n match = False\n if len(fd.tag_prefixes) == 0 and fd.tag == tag_str:\n match = True\n else:\n for rt in fd.tag_prefixes:\n if rt + fd.tag == tag_str:\n match = True\n break\n if match:\n if only_first_hit:\n # only first match\n self.name = fd.name\n self.preferred_decoding = fd.intended_decoding\n self.try_unpack = fd.as_packed\n return\n else:\n # overwrite matches till last rule was inspected\n # (f.e. allows to define name in one rule and intended_decoding in another one)\n name = fd.name if fd.name else name\n decoding = (\n fd.intended_decoding\n if fd.intended_decoding\n else decoding\n )\n if fd.as_packed:\n as_packed = True\n\n if name:\n self.name = name\n if decoding:\n self.preferred_decoding = decoding\n self.try_unpack = as_packed\n except Exception as e:\n ctx.log.warn(e)\n\n def _gen_tag_str(self):\n tags = self.parent_tags[:]\n tags.append(self.tag)\n return \".\".join([str(tag) for tag in tags])\n\n def safe_decode_as(\n self,\n intended_decoding: ProtoParser.DecodedTypes,\n try_as_packed: bool = False,\n ) -> tuple[\n ProtoParser.DecodedTypes,\n bool | float | int | bytes | str | list[ProtoParser.Field],\n ]:\n \"\"\"\n Tries to decode as intended, applies failover, if not possible\n\n Returns selected decoding and decoded value\n \"\"\"\n if self.wire_type == ProtoParser.WireTypes.varint:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n if int(self.wire_value).bit_length() > 32:\n # ignore the fact that varint could exceed 64bit (would violate the specs)\n return ProtoParser.DecodedTypes.uint64, self.wire_value\n else:\n return ProtoParser.DecodedTypes.uint32, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.bit_64:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n return ProtoParser.DecodedTypes.fixed64, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.bit_32:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n return ProtoParser.DecodedTypes.fixed32, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.len_delimited:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n # failover strategy: message --> string (valid UTF-8) --> bytes\n len_delimited_strategy: list[ProtoParser.DecodedTypes] = [\n ProtoParser.DecodedTypes.message,\n ProtoParser.DecodedTypes.string,\n ProtoParser.DecodedTypes.bytes, # should always work\n ]\n for failover_decoding in len_delimited_strategy:\n if failover_decoding == intended_decoding and not try_as_packed:\n # don't try same decoding twice, unless first attempt was packed\n continue\n try:\n return failover_decoding, self.decode_as(\n failover_decoding, False\n )\n except:\n pass\n\n # we should never get here (could not be added to tests)\n return ProtoParser.DecodedTypes.unknown, self.wire_value\n\n def decode_as(\n self, intended_decoding: ProtoParser.DecodedTypes, as_packed: bool = False\n ) -> bool | int | float | bytes | str | list[ProtoParser.Field]:\n if as_packed is True:\n return ProtoParser.read_packed_fields(packed_field=self)\n\n if self.wire_type == ProtoParser.WireTypes.varint:\n assert isinstance(self.wire_value, int)\n if intended_decoding == ProtoParser.DecodedTypes.bool:\n # clamp result to 64bit\n return self.wire_value & 0xFFFFFFFFFFFFFFFF != 0\n elif intended_decoding == ProtoParser.DecodedTypes.int32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for int32\")\n return struct.unpack(\"!i\", struct.pack(\"!I\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.int64:\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large for int64\")\n return struct.unpack(\"!q\", struct.pack(\"!Q\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.uint32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for uint32\")\n return self.wire_value # already 'int' which was parsed as unsigned\n elif (\n intended_decoding == ProtoParser.DecodedTypes.uint64\n or intended_decoding == ProtoParser.DecodedTypes.enum\n ):\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large\")\n return self.wire_value # already 'int' which was parsed as unsigned\n elif intended_decoding == ProtoParser.DecodedTypes.sint32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for sint32\")\n return (self.wire_value >> 1) ^ -(\n self.wire_value & 1\n ) # zigzag_decode\n elif intended_decoding == ProtoParser.DecodedTypes.sint64:\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large for sint64\")\n # ZigZag decode\n # Ref: https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba\n return (self.wire_value >> 1) ^ -(self.wire_value & 1)\n elif (\n intended_decoding == ProtoParser.DecodedTypes.float\n or intended_decoding == ProtoParser.DecodedTypes.double\n ):\n # special case, not complying to protobuf specs\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.bit_64:\n if intended_decoding == ProtoParser.DecodedTypes.fixed64:\n return self.wire_value\n elif intended_decoding == ProtoParser.DecodedTypes.sfixed64:\n return struct.unpack(\"!q\", struct.pack(\"!Q\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.double:\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.bit_32:\n if intended_decoding == ProtoParser.DecodedTypes.fixed32:\n return self.wire_value\n elif intended_decoding == ProtoParser.DecodedTypes.sfixed32:\n return struct.unpack(\"!i\", struct.pack(\"!I\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.float:\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.len_delimited:\n assert isinstance(self.wire_value, bytes)\n if intended_decoding == ProtoParser.DecodedTypes.string:\n # According to specs, a protobuf string HAS TO be UTF-8 parsable\n # throw exception on invalid UTF-8 chars, but escape linebreaks\n return self.wire_value_as_utf8(escape_newline=True)\n elif intended_decoding == ProtoParser.DecodedTypes.bytes:\n # always works, assure to hand back a copy\n return self.wire_value[:]\n elif intended_decoding == ProtoParser.DecodedTypes.message:\n return ProtoParser.read_fields(\n wire_data=self.wire_value,\n parent_field=self,\n options=self.options,\n rules=self.rules,\n )\n\n # if here, there is no valid decoding\n raise TypeError(\"intended decoding mismatches wire type\")\n\n def encode_from(inputval, intended_encoding: ProtoParser.DecodedTypes):\n raise NotImplementedError(\n \"Future work, needed to manipulate and re-encode protobuf message, with respect to given wire types\"\n )\n\n def _wire_value_as_float(self) -> float:\n \"\"\"\n Handles double (64bit) and float (32bit).\n Assumes Network Byte Order (big endian).\n\n Usable for:\n\n WireType --> Protobuf Type):\n ----------------------------\n varint --> double/float (not intended by ProtoBuf, but used in the wild)\n bit_32 --> float\n bit_64 --> double\n len_delimited --> 4 bytes: float / 8 bytes: double / other sizes return NaN\n \"\"\"\n v = self._value_as_bytes()\n if len(v) == 4:\n return struct.unpack(\"!f\", v)[0]\n elif len(v) == 8:\n return struct.unpack(\"!d\", v)[0]\n # no need to raise an Exception\n raise TypeError(\"can not be converted to floatingpoint representation\")\n\n def _value_as_bytes(self) -> bytes:\n if isinstance(self.wire_value, bytes):\n return self.wire_value\n elif isinstance(self.wire_value, int):\n if self.wire_value.bit_length() > 64:\n # source for a python int are wiretypes varint/bit_32/bit64 and should never convert to int values 64bit\n # currently avoided by kaitai decoder (can not be added to tests)\n raise ValueError(\"value exceeds 64bit, violating protobuf specs\")\n elif self.wire_value.bit_length() > 32:\n # packing uses network byte order (to assure consistent results across architectures)\n return struct.pack(\"!Q\", self.wire_value)\n else:\n # packing uses network byte order (to assure consistent results across architectures)\n return struct.pack(\"!I\", self.wire_value)\n else:\n # should never happen, no tests\n raise ValueError(\"can not be converted to bytes\")\n\n def _wire_type_str(self):\n return str(self.wire_type).split(\".\")[-1]\n\n def _decoding_str(self, decoding: ProtoParser.DecodedTypes):\n return str(decoding).split(\".\")[-1]\n\n def wire_value_as_utf8(self, escape_newline=True) -> str:\n if isinstance(self.wire_value, bytes):\n res = self.wire_value.decode(\"utf-8\")\n return res.replace(\"\\n\", \"\\\\n\") if escape_newline else res\n return str(self.wire_value)\n\n def gen_flat_decoded_field_dicts(self) -> Generator[dict, None, None]:\n \"\"\"\n Returns a generator which passes the field as a dict.\n\n In order to return the field value it gets decoded (based on a failover strategy and\n provided ParserRules).\n If the field holds a nested message, the fields contained in the message are appended.\n Ultimately this flattens all fields recursively.\n \"\"\"\n selected_decoding, decoded_val = self.safe_decode_as(\n self.preferred_decoding, self.try_unpack\n )\n field_desc_dict = {\n \"tag\": self._gen_tag_str(),\n \"wireType\": self._wire_type_str(),\n \"decoding\": self._decoding_str(selected_decoding),\n \"name\": self.name,\n }\n if isinstance(decoded_val, list):\n if (\n selected_decoding\n == ProtoParser.DecodedTypes.message # field is a message with subfields\n and not self.is_packed_parent # field is a message, but replaced by packed fields\n ):\n # Field is a message, not packed, thus include it as message header\n field_desc_dict[\"val\"] = \"\"\n yield field_desc_dict\n # add sub-fields of messages or packed fields\n for f in decoded_val:\n yield from f.gen_flat_decoded_field_dicts()\n else:\n field_desc_dict[\"val\"] = decoded_val\n yield field_desc_dict\n\n def __init__(\n self,\n data: bytes,\n rules: list[ProtoParser.ParserRule] = None,\n parser_options: ParserOptions = None,\n ) -> None:\n self.data: bytes = data\n if parser_options is None:\n parser_options = ProtoParser.ParserOptions()\n self.options = parser_options\n if rules is None:\n rules = []\n self.rules = rules\n\n try:\n self.root_fields: list[ProtoParser.Field] = ProtoParser.read_fields(\n wire_data=self.data,\n options=self.options,\n parent_field=None,\n rules=self.rules,\n )\n except Exception as e:\n raise ValueError(\"not a valid protobuf message\") from e\n\n def gen_flat_decoded_field_dicts(self) -> Generator[dict, None, None]:\n for f in self.root_fields:\n yield from f.gen_flat_decoded_field_dicts()\n\n def gen_str_rows(self) -> Generator[tuple[str, ...], None, None]:\n for field_dict in self.gen_flat_decoded_field_dicts():\n if (\n self.options.exclude_message_headers\n and field_dict[\"decoding\"] == \"message\"\n ):\n continue\n\n if self.options.include_wiretype:\n col1 = \"[{}->{}]\".format(field_dict[\"wireType\"], field_dict[\"decoding\"])\n else:\n col1 = \"[{}]\".format(field_dict[\"decoding\"])\n col2 = field_dict[\"name\"] # empty string if not set (consumes no space)\n col3 = field_dict[\"tag\"]\n col4 = str(field_dict[\"val\"])\n yield col1, col2, col3, col4\n\n\n# Note: all content view formating functionality is kept out of the ProtoParser class, to\n# allow it to be use independently.\n# This function is generic enough, to consider moving it to mitmproxy.contentviews.base\ndef format_table(\n table_rows: Iterable[tuple[str, ...]],\n max_col_width=100,\n) -> Iterator[base.TViewLine]:\n \"\"\"\n Helper function to render tables with variable column count (move to contentview base, if needed elsewhere)\n\n Note: The function has to convert generators to a list, as all rows have to be processed twice (to determine\n the column widths first).\n \"\"\"\n rows: list[tuple[str, ...]] = []\n col_count = 0\n cols_width: list[int] = []\n for row in table_rows:\n col_count = max(col_count, len(row))\n while len(cols_width) < col_count:\n cols_width.append(0)\n for col_num in range(len(row)):\n cols_width[col_num] = max(len(row[col_num]), cols_width[col_num])\n\n # store row in list\n rows.append(row)\n\n for i in range(len(cols_width)):\n cols_width[i] = min(cols_width[i], max_col_width)\n\n for row in rows:\n line: base.TViewLine = []\n for col_num in range(len(row)):\n col_val = row[col_num].ljust(cols_width[col_num] + 2)\n line.append((\"text\", col_val))\n yield line\n\n\ndef parse_grpc_messages(\n data, compression_scheme\n) -> Generator[tuple[bool, bytes], None, None]:\n \"\"\"Generator iterates over body data and returns a boolean indicating if the messages\n was compressed, along with the raw message data (decompressed) for each gRPC message\n contained in the body data\"\"\"\n while data:\n try:\n msg_is_compressed, length = struct.unpack(\"!?i\", data[:5])\n decoded_message = struct.unpack(\"!%is\" % length, data[5 : 5 + length])[0]\n except Exception as e:\n raise ValueError(\"invalid gRPC message\") from e\n\n if msg_is_compressed:\n try:\n decoded_message = decode(\n encoded=decoded_message, encoding=compression_scheme\n )\n except Exception as e:\n raise ValueError(\"Failed to decompress gRPC message with gzip\") from e\n\n yield msg_is_compressed, decoded_message\n data = data[5 + length :]\n\n\n# hacky fix for mitmproxy issue:\n#\n# mitmproxy handles Exceptions in the contenview's __call__ function, by\n# failing over to 'Raw' view. The intention was to use this behavior to\n# pass up Exceptions thrown inside the generator function ('format_pbuf'\n# and 'format_grpc') to the __call__ function.\n# This usually works fine if the contentview is initialized on a flow\n# with invalid data.\n# When the flow data gets invalidated in the edit mode, mitmproxy re-calls\n# the generator functions outside the contentviews '__call__' method.\n#\n# This happens in the 'safe_to_print' function of 'mitmproxy/contentvies/__init__.py'\n#\n# def safe_to_print(lines, encoding=\"utf8\"):\n# \"\"\"\n# Wraps a content generator so that each text portion is a *safe to print* unicode string.\n# \"\"\"\n# for line in lines: # <------ this code re-iterates lines and thus calls generators, without using the views __call__ function\n# clean_line = []\n# for (style, text) in line:\n# if isinstance(text, bytes):\n# text = text.decode(encoding, \"replace\")\n# text = strutils.escape_control_characters(text)\n# clean_line.append((style, text))\n# yield clean_line\n#\n# In result, mitmproxy crashes if the generator functions raise Exception to indicate\n# data parsing errors.\n# To deal with this, the generator function gets converted into a list inside the\n# __call__ function. Ultimately, exceptions are raised directly from within __call__\n# instead in cases where the generator is accessed externally without exception handling.\ndef hack_generator_to_list(generator_func):\n return list(generator_func)\n\n\ndef format_pbuf(\n message: bytes,\n parser_options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n):\n yield from format_table(\n ProtoParser(\n data=message, parser_options=parser_options, rules=rules\n ).gen_str_rows()\n )\n\n\ndef format_grpc(\n data: bytes,\n parser_options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n compression_scheme=\"gzip\",\n):\n message_count = 0\n for compressed, pb_message in parse_grpc_messages(\n data=data, compression_scheme=compression_scheme\n ):\n headline = (\n \"gRPC message \"\n + str(message_count)\n + \" (compressed \"\n + str(compression_scheme if compressed else compressed)\n + \")\"\n )\n\n yield [(\"text\", headline)]\n yield from format_pbuf(\n message=pb_message, parser_options=parser_options, rules=rules\n )\n\n\n@dataclass\nclass ViewConfig:\n parser_options: ProtoParser.ParserOptions = ProtoParser.ParserOptions()\n parser_rules: list[ProtoParser.ParserRule] = field(default_factory=list)\n\n\nclass ViewGrpcProtobuf(base.View):\n \"\"\"Human friendly view of protocol buffers\"\"\"\n\n name = \"gRPC/Protocol Buffer\"\n __content_types_pb = [\n \"application/x-protobuf\",\n \"application/x-protobuffer\",\n \"application/grpc-proto\",\n ]\n __content_types_grpc = [\n \"application/grpc\",\n ]\n\n # first value serves as default algorithm for compressed messages, if 'grpc-encoding' header is missing\n __valid_grpc_encodings = [\n \"gzip\",\n \"identity\",\n \"deflate\",\n ]\n\n # allows to take external ParserOptions object. goes with defaults otherwise\n def __init__(self, config: ViewConfig = None) -> None:\n super().__init__()\n if config is None:\n config = ViewConfig()\n self.config = config\n\n def _matching_rules(\n self,\n rules: list[ProtoParser.ParserRule],\n message: http.Message | None,\n flow: flow.Flow | None,\n ) -> list[ProtoParser.ParserRule]:\n \"\"\"\n Checks which of the give rules applies and returns a List only containing those rules\n\n Each rule defines a flow filter in rule.filter which is usually matched against a flow.\n When it comes to protobuf parsing, in most cases request messages differ from response messages.\n Thus, it has to be possible to apply a rule to a http.Request or a http.Response, only.\n\n As the name flowfilter suggests, filters are working on a flow-level, not on message-level.\n This means:\n\n - the filter expression '~q' matches all flows with a request, but no response\n - the filter expression '~s' matches all flows with a response\n\n In result, for complete flows (with a gRPC message in the request and the response), ParserRules would\n either be applied to request and response at the same time ('~s') or neither would match request, nor\n response (~q).\n\n To distinguish between rules which should be applied to response messages, request messages or both\n (while being applied to the whole flow), different classes with same behavior are used to wrap rules:\n\n - ParserRule: applies to requests and responses\n - ParserRuleRequest: applies to requests only\n - ParserRuleResponse: applies to responses only\n \"\"\"\n res: list[ProtoParser.ParserRule] = []\n if not flow:\n return res\n is_request = isinstance(message, http.Request)\n for rule in rules:\n # message based rule matching\n if is_request and isinstance(rule, ProtoParser.ParserRuleResponse):\n continue\n elif not is_request and isinstance(rule, ProtoParser.ParserRuleRequest):\n continue\n # flow based rule matching\n if flowfilter.match(rule.filter, flow=flow):\n res.append(rule)\n return res\n\n def __call__(\n self,\n data: bytes,\n *,\n content_type: str | None = None,\n flow: flow.Flow | None = None,\n http_message: http.Message | None = None,\n **unknown_metadata,\n ) -> contentviews.TViewResult:\n applicabble_rules = self._matching_rules(\n rules=self.config.parser_rules, flow=flow, message=http_message\n )\n if content_type in self.__content_types_grpc:\n # If gRPC messages are flagged to be compressed, the compression algorithm is expressed in the\n # 'grpc-encoding' header.\n #\n # The following code tries to determine the compression algorithm base on this header.\n # If the header is not present or contains an unsupported compression, the logic falls back to\n # 'gzip'.\n #\n # If a compressed gRPC message is found in the body data (compressed flag set), the information\n # on the compression scheme is needed (even if not set by a header), in order to process the message.\n # Thus we assure there is always an encoding selected. An encoding of 'Identity' would not make\n # sense, if a message is flagged as being compressed, that's why a default is chosen.\n try:\n assert http_message is not None\n h = http_message.headers[\"grpc-encoding\"]\n grpc_encoding = (\n h\n if h in self.__valid_grpc_encodings\n else self.__valid_grpc_encodings[0]\n )\n except:\n grpc_encoding = self.__valid_grpc_encodings[0]\n\n text_iter = format_grpc(\n data=data,\n parser_options=self.config.parser_options,\n compression_scheme=grpc_encoding,\n rules=applicabble_rules,\n )\n title = \"gRPC\"\n else:\n text_iter = format_pbuf(\n message=data,\n parser_options=self.config.parser_options,\n rules=applicabble_rules,\n )\n title = \"Protobuf (flattened)\"\n\n # hacky bugfix, see description above generator functions format_pbuf/format_grpc\n try:\n text_iter = hack_generator_to_list(text_iter)\n except Exception as e:\n # hook to log exception tracebacks on iterators\n\n # import traceback\n # ctx.log.warn(\"gRPC contentview: {}\".format(traceback.format_exc()))\n raise e\n\n return title, text_iter\n\n def render_priority(\n self,\n data: bytes,\n *,\n content_type: str | None = None,\n flow: flow.Flow | None = None,\n http_message: http.Message | None = None,\n **unknown_metadata,\n ) -> float:\n\n if bool(data) and content_type in self.__content_types_grpc:\n return 1\n if bool(data) and content_type in self.__content_types_pb:\n # replace existing protobuf renderer preference (adjust by option)\n return 1.5\n else:\n return 0\n", "path": "mitmproxy/contentviews/grpc.py" } ]
[ { "content": "from __future__ import annotations\n\nimport struct\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Generator, Iterable, Iterator\n\nfrom mitmproxy import contentviews, ctx, flow, flowfilter, http\nfrom mitmproxy.contentviews import base\nfrom mitmproxy.net.encoding import decode\n\n\nclass ProtoParser:\n @dataclass\n class ParserRule:\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n Matching on flow-level also means, a match applies to request AND response messages.\n To restrict a rule to a requests only use 'ParserRuleRequest', instead.\n To restrict a rule to a responses only use 'ParserRuleResponse', instead.\n \"\"\"\n\n field_definitions: list[ProtoParser.ParserFieldDefinition]\n \"\"\"List of field definitions for this rule \"\"\"\n\n name: str = \"\"\n \"\"\"Name of this rule, only used for debugging\"\"\"\n\n filter: str = \"\"\n \"\"\"\n Flowfilter to select which flows to apply to ('~q' and '~s' can not be used to distinguish\n if the rule should apply to the request or response of a flow. To do so, use ParserRuleRequest\n or ParserRuleResponse. ParserRule always applies to request and response.)\n \"\"\"\n\n @dataclass\n class ParserRuleResponse(ParserRule):\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n The rule only applies if the processed message is a server response.\n \"\"\"\n\n @dataclass\n class ParserRuleRequest(ParserRule):\n \"\"\"\n A parser rule lists Field definitions which are applied if the filter rule matches the flow.\n\n The rule only applies if the processed message is a client request.\n \"\"\"\n\n @dataclass\n class ParserFieldDefinition:\n \"\"\"\n Defines how to parse a field (or multiple fields with the same tag) in a protobuf messages.\n\n This allows to apply an intended decoding (f.e. decode uint64 as double instead) and to assign\n a descriptive name to a field. Field definitions are aggregated into rules, which also holds\n a filter to match selected HTTP messages.\n\n The most natural way to use this, is to describe known parts of a single protobuf message\n in a set of field descriptors, pack them into a rule and set the filter of the rule in a way,\n that it only applies to proper protobuf messages (f.e. to request traffic against an API endpoint\n matched by an URL flowfilter)\n \"\"\"\n\n # A 'tag' could be considered as \"absolute path\" to match a unique field, yet\n # protobuf allows to uses the same nested message in different positions of the parent message\n # The 'tag_prefixes' parameter allows to apply the field definition to different \"leafs nodes\"\n # of a message.\n #\n # Example 1: match a single, absolute tag\n # ----------\n # tag = '1.2'\n # tag_prefixes = [] (default)\n #\n # applies to: tag '1.2'\n #\n # Example 2: match multiple tags with same ending\n # ----------\n # tag = '1.3'\n # tag_prefixes = ['1.2.', '2.5.']\n #\n # applies to: tag '1.2.1.3' and tag '2.5.1.3'\n # does not apply to: '1.3', unless tag_prefixes is extended to tag_prefixes = ['1.2', '2.5', '']\n #\n # Example 3: match multiple tags\n # ----------\n # tag = ''\n # tag_prefixes = ['1.2', '2.5']\n #\n # applies to: tag '1.2' and tag '1.5'\n\n tag: str\n \"\"\"Field tag for which this description applies (including flattened tag path, f.e. '1.2.2.4')\"\"\"\n\n tag_prefixes: list[str] = field(default_factory=list)\n \"\"\"List of prefixes for tag matching (f.e. tag_prefixes=['1.2.', '2.2.'] with tag='1' matches '1.2.1' and '2.2.1')\"\"\"\n\n intended_decoding: ProtoParser.DecodedTypes | None = None\n \"\"\"optional: intended decoding for visualization (parser fails over to alternate decoding if not possible)\"\"\"\n\n name: str | None = None\n \"\"\"optional: intended field for visualization (parser fails over to alternate decoding if not possible)\"\"\"\n\n as_packed: bool | None = False\n \"\"\"optional: if set to true, the field is considered to be repeated and packed\"\"\"\n\n @dataclass\n class ParserOptions:\n # output should contain wiretype of fields\n include_wiretype: bool = False\n\n # output should contain the fields which describe nested messages\n # (the nested messages bodies are always included, but the \"header fields\" could\n # add unnecessary output overhead)\n exclude_message_headers: bool = False\n\n # optional: rules\n # rules: List[ProtoParser.ParserRule] = field(default_factory=list)\n\n class DecodedTypes(Enum):\n # varint\n int32 = 0\n int64 = 1\n uint32 = 2\n uint64 = 3\n sint32 = 4 # ZigZag encoding\n sint64 = 5 # ZigZag encoding\n bool = 6\n enum = 7\n # bit_32\n fixed32 = 8\n sfixed32 = 9\n float = 10\n # bit_64\n fixed64 = 11\n sfixed64 = 12\n double = 13\n # len_delimited\n string = 14\n bytes = 15\n message = 16\n\n # helper\n unknown = 17\n\n @staticmethod\n def _read_base128le(data: bytes) -> tuple[int, int]:\n res = 0\n offset = 0\n while offset < len(data):\n o = data[offset]\n res += (o & 0x7F) << (7 * offset)\n offset += 1\n if o < 0x80:\n # the Kaitai parser for protobuf support base128 le values up\n # to 8 groups (bytes). Due to the nature of the encoding, each\n # group attributes 7bit to the resulting value, which give\n # a 56 bit value at maximum.\n # The values which get encoded into protobuf variable length integers,\n # on the other hand, include full 64bit types (int64, uint64, sint64).\n # This means, the Kaitai encoder can not cover the full range of\n # possible values\n #\n # This decoder puts no limitation on the maximum value of variable\n # length integers. Values exceeding 64bit have to be handled externally\n return offset, res\n raise ValueError(\"varint exceeds bounds of provided data\")\n\n @staticmethod\n def _read_u32(data: bytes) -> tuple[int, int]:\n return 4, struct.unpack(\"<I\", data[:4])[0]\n\n @staticmethod\n def _read_u64(data: bytes) -> tuple[int, int]:\n return 8, struct.unpack(\"<Q\", data[:8])[0]\n\n class WireTypes(Enum):\n varint = 0\n bit_64 = 1\n len_delimited = 2\n group_start = 3\n group_end = 4\n bit_32 = 5\n\n @staticmethod\n def read_fields(\n wire_data: bytes,\n parent_field: ProtoParser.Field | None,\n options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n ) -> list[ProtoParser.Field]:\n res: list[ProtoParser.Field] = []\n pos = 0\n while pos < len(wire_data):\n # read field key (tag and wire_type)\n offset, key = ProtoParser._read_base128le(wire_data[pos:])\n # casting raises exception for invalid WireTypes\n wt = ProtoParser.WireTypes(key & 7)\n tag = key >> 3\n pos += offset\n\n val: bytes | int\n preferred_decoding: ProtoParser.DecodedTypes\n if wt == ProtoParser.WireTypes.varint:\n offset, val = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n bl = val.bit_length()\n if bl > 64:\n preferred_decoding = ProtoParser.DecodedTypes.unknown\n if bl > 32:\n preferred_decoding = ProtoParser.DecodedTypes.uint64\n else:\n preferred_decoding = ProtoParser.DecodedTypes.uint32\n elif wt == ProtoParser.WireTypes.bit_64:\n offset, val = ProtoParser._read_u64(wire_data[pos:])\n pos += offset\n preferred_decoding = ProtoParser.DecodedTypes.fixed64\n elif wt == ProtoParser.WireTypes.len_delimited:\n offset, length = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n if length > len(wire_data[pos:]):\n raise ValueError(\"length delimited field exceeds data size\")\n val = wire_data[pos : pos + length]\n pos += length\n preferred_decoding = ProtoParser.DecodedTypes.message\n elif (\n wt == ProtoParser.WireTypes.group_start\n or wt == ProtoParser.WireTypes.group_end\n ):\n raise ValueError(f\"deprecated field: {wt}\")\n elif wt == ProtoParser.WireTypes.bit_32:\n offset, val = ProtoParser._read_u32(wire_data[pos:])\n pos += offset\n preferred_decoding = ProtoParser.DecodedTypes.fixed32\n else:\n # not reachable as if-else statements contain all possible WireTypes\n # wrong types raise Exception during typecasting in `wt = ProtoParser.WireTypes((key & 7))`\n raise ValueError(\"invalid WireType for protobuf messsage field\")\n\n field = ProtoParser.Field(\n wire_type=wt,\n preferred_decoding=preferred_decoding,\n options=options,\n rules=rules,\n tag=tag,\n wire_value=val,\n parent_field=parent_field,\n )\n res.append(field)\n\n return res\n\n @staticmethod\n def read_packed_fields(\n packed_field: ProtoParser.Field,\n ) -> list[ProtoParser.Field]:\n if not isinstance(packed_field.wire_value, bytes):\n ctx.log(type(packed_field.wire_value))\n raise ValueError(\"can not unpack field with data other than bytes\")\n wire_data: bytes = packed_field.wire_value\n tag: int = packed_field.tag\n options: ProtoParser.ParserOptions = packed_field.options\n rules: list[ProtoParser.ParserRule] = packed_field.rules\n intended_decoding: ProtoParser.DecodedTypes = packed_field.preferred_decoding\n\n # the packed field has to have WireType length delimited, whereas the contained\n # individual types have to have a different WireType, which is derived from\n # the intended decoding\n if (\n packed_field.wire_type != ProtoParser.WireTypes.len_delimited\n or not isinstance(packed_field.wire_value, bytes)\n ):\n raise ValueError(\n \"packed fields have to be embedded in a length delimited message\"\n )\n # wiretype to read has to be determined from intended decoding\n packed_wire_type: ProtoParser.WireTypes\n if (\n intended_decoding == ProtoParser.DecodedTypes.int32\n or intended_decoding == ProtoParser.DecodedTypes.int64\n or intended_decoding == ProtoParser.DecodedTypes.uint32\n or intended_decoding == ProtoParser.DecodedTypes.uint64\n or intended_decoding == ProtoParser.DecodedTypes.sint32\n or intended_decoding == ProtoParser.DecodedTypes.sint64\n or intended_decoding == ProtoParser.DecodedTypes.bool\n or intended_decoding == ProtoParser.DecodedTypes.enum\n ):\n packed_wire_type = ProtoParser.WireTypes.varint\n elif (\n intended_decoding == ProtoParser.DecodedTypes.fixed32\n or intended_decoding == ProtoParser.DecodedTypes.sfixed32\n or intended_decoding == ProtoParser.DecodedTypes.float\n ):\n packed_wire_type = ProtoParser.WireTypes.bit_32\n elif (\n intended_decoding == ProtoParser.DecodedTypes.fixed64\n or intended_decoding == ProtoParser.DecodedTypes.sfixed64\n or intended_decoding == ProtoParser.DecodedTypes.double\n ):\n packed_wire_type = ProtoParser.WireTypes.bit_64\n elif (\n intended_decoding == ProtoParser.DecodedTypes.string\n or intended_decoding == ProtoParser.DecodedTypes.bytes\n or intended_decoding == ProtoParser.DecodedTypes.message\n ):\n packed_wire_type = ProtoParser.WireTypes.len_delimited\n else:\n # should never happen, no test\n raise TypeError(\n \"Wire type could not be determined from packed decoding type\"\n )\n\n res: list[ProtoParser.Field] = []\n pos = 0\n val: bytes | int\n if packed_wire_type == ProtoParser.WireTypes.varint:\n while pos < len(wire_data):\n offset, val = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n elif packed_wire_type == ProtoParser.WireTypes.bit_64:\n if len(wire_data) % 8 != 0:\n raise ValueError(\"can not parse as packed bit64\")\n while pos < len(wire_data):\n offset, val = ProtoParser._read_u64(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n elif packed_wire_type == ProtoParser.WireTypes.len_delimited:\n while pos < len(wire_data):\n offset, length = ProtoParser._read_base128le(wire_data[pos:])\n pos += offset\n val = wire_data[pos : pos + length]\n if length > len(wire_data[pos:]):\n raise ValueError(\"packed length delimited field exceeds data size\")\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n pos += length\n elif (\n packed_wire_type == ProtoParser.WireTypes.group_start\n or packed_wire_type == ProtoParser.WireTypes.group_end\n ):\n raise ValueError(\"group tags can not be encoded packed\")\n elif packed_wire_type == ProtoParser.WireTypes.bit_32:\n if len(wire_data) % 4 != 0:\n raise ValueError(\"can not parse as packed bit32\")\n while pos < len(wire_data):\n offset, val = ProtoParser._read_u32(wire_data[pos:])\n pos += offset\n res.append(\n ProtoParser.Field(\n options=options,\n preferred_decoding=intended_decoding,\n rules=rules,\n tag=tag,\n wire_type=packed_wire_type,\n wire_value=val,\n parent_field=packed_field.parent_field,\n is_unpacked_children=True,\n )\n )\n else:\n # should never happen\n raise ValueError(\"invalid WireType for protobuf messsage field\")\n\n # mark parent field as packed parent (if we got here, unpacking succeeded)\n packed_field.is_packed_parent = True\n return res\n\n class Field:\n \"\"\"\n Represents a single field of a protobuf message and handles the varios encodings.\n\n As mitmproxy sees the data passing by as raw protobuf message, it only knows the\n WireTypes. Each of the WireTypes could represent different Protobuf field types.\n The exact Protobuf field type can not be determined from the wire format, thus different\n options for decoding have to be supported.\n In addition the parsed WireTypes are (intermediary) stored in Python types, which adds\n some additional overhead type conversions.\n\n WireType represented Protobuf Types Python type (intermediary)\n\n 0: varint int32, int64, uint32, uint64, enum, int (*)\n sint32, sint64 (both ZigZag encoded), int\n bool bool\n float (**)\n\n 1: bit_64 fixed64, sfixed64, int (*)\n double float\n\n 2: len_delimited string, str\n message, class 'Message'\n bytes, bytes (*)\n packed_repeated_field class 'Message' (fields with same tag)\n\n 3: group_start unused (deprecated) -\n 4: group_end unused (deprecated) -\n\n 5: bit_32 fixed32, sfixed32, int (*)\n float float\n\n (*) Note 1: Conversion between WireType and intermediary python representation\n is handled by Kaitai protobuf decoder and always uses the python\n representation marked with (*). Converting to alternative representations\n is handled inside this class.\n (**) Note 2: Varint is not used to represent floating point values, but some applications\n store native floats in uint32 protobuf types (or native double in uint64).\n Thus we allow conversion of varint to floating point values for convenience\n (A well known APIs \"hide\" GPS latitude and longitude values in varint types,\n much easier to spot such things when rendered as float)\n\n Ref: - https://developers.google.com/protocol-buffers/docs/proto3\n - https://developers.google.com/protocol-buffers/docs/encoding\n \"\"\"\n\n def __init__(\n self,\n wire_type: ProtoParser.WireTypes,\n preferred_decoding: ProtoParser.DecodedTypes,\n tag: int,\n parent_field: ProtoParser.Field | None,\n wire_value: int | bytes,\n options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n is_unpacked_children: bool = False,\n ) -> None:\n self.wire_type: ProtoParser.WireTypes = wire_type\n self.preferred_decoding: ProtoParser.DecodedTypes = preferred_decoding\n self.wire_value: int | bytes = wire_value\n self.tag: int = tag\n self.options: ProtoParser.ParserOptions = options\n self.name: str = \"\"\n self.rules: list[ProtoParser.ParserRule] = rules\n self.parent_field: ProtoParser.Field | None = parent_field\n self.is_unpacked_children: bool = (\n is_unpacked_children # marks field as being a result of unpacking\n )\n self.is_packed_parent: bool = (\n False # marks field as being parent of successfully unpacked children\n )\n self.parent_tags: list[int] = []\n if self.parent_field is not None:\n self.parent_tags = self.parent_field.parent_tags[:]\n self.parent_tags.append(self.parent_field.tag)\n self.try_unpack = False\n\n # rules can overwrite self.try_unpack\n self.apply_rules()\n # do not unpack fields which are the result of unpacking\n if parent_field is not None and self.is_unpacked_children:\n self.try_unpack = False\n\n # no tests for only_first_hit=False, as not user-changable\n def apply_rules(self, only_first_hit=True):\n tag_str = self._gen_tag_str()\n name = None\n decoding = None\n as_packed = False\n try:\n for rule in self.rules:\n for fd in rule.field_definitions:\n match = False\n if len(fd.tag_prefixes) == 0 and fd.tag == tag_str:\n match = True\n else:\n for rt in fd.tag_prefixes:\n if rt + fd.tag == tag_str:\n match = True\n break\n if match:\n if only_first_hit:\n # only first match\n self.name = fd.name\n self.preferred_decoding = fd.intended_decoding\n self.try_unpack = fd.as_packed\n return\n else:\n # overwrite matches till last rule was inspected\n # (f.e. allows to define name in one rule and intended_decoding in another one)\n name = fd.name if fd.name else name\n decoding = (\n fd.intended_decoding\n if fd.intended_decoding\n else decoding\n )\n if fd.as_packed:\n as_packed = True\n\n if name:\n self.name = name\n if decoding:\n self.preferred_decoding = decoding\n self.try_unpack = as_packed\n except Exception as e:\n ctx.log.warn(e)\n\n def _gen_tag_str(self):\n tags = self.parent_tags[:]\n tags.append(self.tag)\n return \".\".join([str(tag) for tag in tags])\n\n def safe_decode_as(\n self,\n intended_decoding: ProtoParser.DecodedTypes,\n try_as_packed: bool = False,\n ) -> tuple[\n ProtoParser.DecodedTypes,\n bool | float | int | bytes | str | list[ProtoParser.Field],\n ]:\n \"\"\"\n Tries to decode as intended, applies failover, if not possible\n\n Returns selected decoding and decoded value\n \"\"\"\n if self.wire_type == ProtoParser.WireTypes.varint:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n if int(self.wire_value).bit_length() > 32:\n # ignore the fact that varint could exceed 64bit (would violate the specs)\n return ProtoParser.DecodedTypes.uint64, self.wire_value\n else:\n return ProtoParser.DecodedTypes.uint32, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.bit_64:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n return ProtoParser.DecodedTypes.fixed64, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.bit_32:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n return ProtoParser.DecodedTypes.fixed32, self.wire_value\n elif self.wire_type == ProtoParser.WireTypes.len_delimited:\n try:\n return intended_decoding, self.decode_as(\n intended_decoding, try_as_packed\n )\n except:\n # failover strategy: message --> string (valid UTF-8) --> bytes\n len_delimited_strategy: list[ProtoParser.DecodedTypes] = [\n ProtoParser.DecodedTypes.message,\n ProtoParser.DecodedTypes.string,\n ProtoParser.DecodedTypes.bytes, # should always work\n ]\n for failover_decoding in len_delimited_strategy:\n if failover_decoding == intended_decoding and not try_as_packed:\n # don't try same decoding twice, unless first attempt was packed\n continue\n try:\n return failover_decoding, self.decode_as(\n failover_decoding, False\n )\n except:\n pass\n\n # we should never get here (could not be added to tests)\n return ProtoParser.DecodedTypes.unknown, self.wire_value\n\n def decode_as(\n self, intended_decoding: ProtoParser.DecodedTypes, as_packed: bool = False\n ) -> bool | int | float | bytes | str | list[ProtoParser.Field]:\n if as_packed is True:\n return ProtoParser.read_packed_fields(packed_field=self)\n\n if self.wire_type == ProtoParser.WireTypes.varint:\n assert isinstance(self.wire_value, int)\n if intended_decoding == ProtoParser.DecodedTypes.bool:\n # clamp result to 64bit\n return self.wire_value & 0xFFFFFFFFFFFFFFFF != 0\n elif intended_decoding == ProtoParser.DecodedTypes.int32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for int32\")\n return struct.unpack(\"!i\", struct.pack(\"!I\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.int64:\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large for int64\")\n return struct.unpack(\"!q\", struct.pack(\"!Q\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.uint32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for uint32\")\n return self.wire_value # already 'int' which was parsed as unsigned\n elif (\n intended_decoding == ProtoParser.DecodedTypes.uint64\n or intended_decoding == ProtoParser.DecodedTypes.enum\n ):\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large\")\n return self.wire_value # already 'int' which was parsed as unsigned\n elif intended_decoding == ProtoParser.DecodedTypes.sint32:\n if self.wire_value.bit_length() > 32:\n raise TypeError(\"wire value too large for sint32\")\n return (self.wire_value >> 1) ^ -(\n self.wire_value & 1\n ) # zigzag_decode\n elif intended_decoding == ProtoParser.DecodedTypes.sint64:\n if self.wire_value.bit_length() > 64:\n raise TypeError(\"wire value too large for sint64\")\n # ZigZag decode\n # Ref: https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba\n return (self.wire_value >> 1) ^ -(self.wire_value & 1)\n elif (\n intended_decoding == ProtoParser.DecodedTypes.float\n or intended_decoding == ProtoParser.DecodedTypes.double\n ):\n # special case, not complying to protobuf specs\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.bit_64:\n if intended_decoding == ProtoParser.DecodedTypes.fixed64:\n return self.wire_value\n elif intended_decoding == ProtoParser.DecodedTypes.sfixed64:\n return struct.unpack(\"!q\", struct.pack(\"!Q\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.double:\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.bit_32:\n if intended_decoding == ProtoParser.DecodedTypes.fixed32:\n return self.wire_value\n elif intended_decoding == ProtoParser.DecodedTypes.sfixed32:\n return struct.unpack(\"!i\", struct.pack(\"!I\", self.wire_value))[0]\n elif intended_decoding == ProtoParser.DecodedTypes.float:\n return self._wire_value_as_float()\n elif self.wire_type == ProtoParser.WireTypes.len_delimited:\n assert isinstance(self.wire_value, bytes)\n if intended_decoding == ProtoParser.DecodedTypes.string:\n # According to specs, a protobuf string HAS TO be UTF-8 parsable\n # throw exception on invalid UTF-8 chars, but escape linebreaks\n return self.wire_value_as_utf8(escape_newline=True)\n elif intended_decoding == ProtoParser.DecodedTypes.bytes:\n # always works, assure to hand back a copy\n return self.wire_value[:]\n elif intended_decoding == ProtoParser.DecodedTypes.message:\n return ProtoParser.read_fields(\n wire_data=self.wire_value,\n parent_field=self,\n options=self.options,\n rules=self.rules,\n )\n\n # if here, there is no valid decoding\n raise TypeError(\"intended decoding mismatches wire type\")\n\n def encode_from(inputval, intended_encoding: ProtoParser.DecodedTypes):\n raise NotImplementedError(\n \"Future work, needed to manipulate and re-encode protobuf message, with respect to given wire types\"\n )\n\n def _wire_value_as_float(self) -> float:\n \"\"\"\n Handles double (64bit) and float (32bit).\n Assumes Network Byte Order (big endian).\n\n Usable for:\n\n WireType --> Protobuf Type):\n ----------------------------\n varint --> double/float (not intended by ProtoBuf, but used in the wild)\n bit_32 --> float\n bit_64 --> double\n len_delimited --> 4 bytes: float / 8 bytes: double / other sizes return NaN\n \"\"\"\n v = self._value_as_bytes()\n if len(v) == 4:\n return struct.unpack(\"!f\", v)[0]\n elif len(v) == 8:\n return struct.unpack(\"!d\", v)[0]\n # no need to raise an Exception\n raise TypeError(\"can not be converted to floatingpoint representation\")\n\n def _value_as_bytes(self) -> bytes:\n if isinstance(self.wire_value, bytes):\n return self.wire_value\n elif isinstance(self.wire_value, int):\n if self.wire_value.bit_length() > 64:\n # source for a python int are wiretypes varint/bit_32/bit64 and should never convert to int values 64bit\n # currently avoided by kaitai decoder (can not be added to tests)\n raise ValueError(\"value exceeds 64bit, violating protobuf specs\")\n elif self.wire_value.bit_length() > 32:\n # packing uses network byte order (to assure consistent results across architectures)\n return struct.pack(\"!Q\", self.wire_value)\n else:\n # packing uses network byte order (to assure consistent results across architectures)\n return struct.pack(\"!I\", self.wire_value)\n else:\n # should never happen, no tests\n raise ValueError(\"can not be converted to bytes\")\n\n def _wire_type_str(self):\n return str(self.wire_type).split(\".\")[-1]\n\n def _decoding_str(self, decoding: ProtoParser.DecodedTypes):\n return str(decoding).split(\".\")[-1]\n\n def wire_value_as_utf8(self, escape_newline=True) -> str:\n if isinstance(self.wire_value, bytes):\n res = self.wire_value.decode(\"utf-8\")\n return res.replace(\"\\n\", \"\\\\n\") if escape_newline else res\n return str(self.wire_value)\n\n def gen_flat_decoded_field_dicts(self) -> Generator[dict, None, None]:\n \"\"\"\n Returns a generator which passes the field as a dict.\n\n In order to return the field value it gets decoded (based on a failover strategy and\n provided ParserRules).\n If the field holds a nested message, the fields contained in the message are appended.\n Ultimately this flattens all fields recursively.\n \"\"\"\n selected_decoding, decoded_val = self.safe_decode_as(\n self.preferred_decoding, self.try_unpack\n )\n field_desc_dict = {\n \"tag\": self._gen_tag_str(),\n \"wireType\": self._wire_type_str(),\n \"decoding\": self._decoding_str(selected_decoding),\n \"name\": self.name,\n }\n if isinstance(decoded_val, list):\n if (\n selected_decoding\n == ProtoParser.DecodedTypes.message # field is a message with subfields\n and not self.is_packed_parent # field is a message, but replaced by packed fields\n ):\n # Field is a message, not packed, thus include it as message header\n field_desc_dict[\"val\"] = \"\"\n yield field_desc_dict\n # add sub-fields of messages or packed fields\n for f in decoded_val:\n yield from f.gen_flat_decoded_field_dicts()\n else:\n field_desc_dict[\"val\"] = decoded_val\n yield field_desc_dict\n\n def __init__(\n self,\n data: bytes,\n rules: list[ProtoParser.ParserRule] = None,\n parser_options: ParserOptions = None,\n ) -> None:\n self.data: bytes = data\n if parser_options is None:\n parser_options = ProtoParser.ParserOptions()\n self.options = parser_options\n if rules is None:\n rules = []\n self.rules = rules\n\n try:\n self.root_fields: list[ProtoParser.Field] = ProtoParser.read_fields(\n wire_data=self.data,\n options=self.options,\n parent_field=None,\n rules=self.rules,\n )\n except Exception as e:\n raise ValueError(\"not a valid protobuf message\") from e\n\n def gen_flat_decoded_field_dicts(self) -> Generator[dict, None, None]:\n for f in self.root_fields:\n yield from f.gen_flat_decoded_field_dicts()\n\n def gen_str_rows(self) -> Generator[tuple[str, ...], None, None]:\n for field_dict in self.gen_flat_decoded_field_dicts():\n if (\n self.options.exclude_message_headers\n and field_dict[\"decoding\"] == \"message\"\n ):\n continue\n\n if self.options.include_wiretype:\n col1 = \"[{}->{}]\".format(field_dict[\"wireType\"], field_dict[\"decoding\"])\n else:\n col1 = \"[{}]\".format(field_dict[\"decoding\"])\n col2 = field_dict[\"name\"] # empty string if not set (consumes no space)\n col3 = field_dict[\"tag\"]\n col4 = str(field_dict[\"val\"])\n yield col1, col2, col3, col4\n\n\n# Note: all content view formating functionality is kept out of the ProtoParser class, to\n# allow it to be use independently.\n# This function is generic enough, to consider moving it to mitmproxy.contentviews.base\ndef format_table(\n table_rows: Iterable[tuple[str, ...]],\n max_col_width=100,\n) -> Iterator[base.TViewLine]:\n \"\"\"\n Helper function to render tables with variable column count (move to contentview base, if needed elsewhere)\n\n Note: The function has to convert generators to a list, as all rows have to be processed twice (to determine\n the column widths first).\n \"\"\"\n rows: list[tuple[str, ...]] = []\n col_count = 0\n cols_width: list[int] = []\n for row in table_rows:\n col_count = max(col_count, len(row))\n while len(cols_width) < col_count:\n cols_width.append(0)\n for col_num in range(len(row)):\n cols_width[col_num] = max(len(row[col_num]), cols_width[col_num])\n\n # store row in list\n rows.append(row)\n\n for i in range(len(cols_width)):\n cols_width[i] = min(cols_width[i], max_col_width)\n\n for row in rows:\n line: base.TViewLine = []\n for col_num in range(len(row)):\n col_val = row[col_num].ljust(cols_width[col_num] + 2)\n line.append((\"text\", col_val))\n yield line\n\n\ndef parse_grpc_messages(\n data, compression_scheme\n) -> Generator[tuple[bool, bytes], None, None]:\n \"\"\"Generator iterates over body data and returns a boolean indicating if the messages\n was compressed, along with the raw message data (decompressed) for each gRPC message\n contained in the body data\"\"\"\n while data:\n try:\n msg_is_compressed, length = struct.unpack(\"!?i\", data[:5])\n decoded_message = struct.unpack(\"!%is\" % length, data[5 : 5 + length])[0]\n except Exception as e:\n raise ValueError(\"invalid gRPC message\") from e\n\n if msg_is_compressed:\n try:\n decoded_message = decode(\n encoded=decoded_message, encoding=compression_scheme\n )\n except Exception as e:\n raise ValueError(\"Failed to decompress gRPC message with gzip\") from e\n\n yield msg_is_compressed, decoded_message\n data = data[5 + length :]\n\n\n# hacky fix for mitmproxy issue:\n#\n# mitmproxy handles Exceptions in the contenview's __call__ function, by\n# failing over to 'Raw' view. The intention was to use this behavior to\n# pass up Exceptions thrown inside the generator function ('format_pbuf'\n# and 'format_grpc') to the __call__ function.\n# This usually works fine if the contentview is initialized on a flow\n# with invalid data.\n# When the flow data gets invalidated in the edit mode, mitmproxy re-calls\n# the generator functions outside the contentviews '__call__' method.\n#\n# This happens in the 'safe_to_print' function of 'mitmproxy/contentvies/__init__.py'\n#\n# def safe_to_print(lines, encoding=\"utf8\"):\n# \"\"\"\n# Wraps a content generator so that each text portion is a *safe to print* unicode string.\n# \"\"\"\n# for line in lines: # <------ this code re-iterates lines and thus calls generators, without using the views __call__ function\n# clean_line = []\n# for (style, text) in line:\n# if isinstance(text, bytes):\n# text = text.decode(encoding, \"replace\")\n# text = strutils.escape_control_characters(text)\n# clean_line.append((style, text))\n# yield clean_line\n#\n# In result, mitmproxy crashes if the generator functions raise Exception to indicate\n# data parsing errors.\n# To deal with this, the generator function gets converted into a list inside the\n# __call__ function. Ultimately, exceptions are raised directly from within __call__\n# instead in cases where the generator is accessed externally without exception handling.\ndef hack_generator_to_list(generator_func):\n return list(generator_func)\n\n\ndef format_pbuf(\n message: bytes,\n parser_options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n):\n yield from format_table(\n ProtoParser(\n data=message, parser_options=parser_options, rules=rules\n ).gen_str_rows()\n )\n\n\ndef format_grpc(\n data: bytes,\n parser_options: ProtoParser.ParserOptions,\n rules: list[ProtoParser.ParserRule],\n compression_scheme=\"gzip\",\n):\n message_count = 0\n for compressed, pb_message in parse_grpc_messages(\n data=data, compression_scheme=compression_scheme\n ):\n headline = (\n \"gRPC message \"\n + str(message_count)\n + \" (compressed \"\n + str(compression_scheme if compressed else compressed)\n + \")\"\n )\n\n yield [(\"text\", headline)]\n yield from format_pbuf(\n message=pb_message, parser_options=parser_options, rules=rules\n )\n\n\n@dataclass\nclass ViewConfig:\n parser_options: ProtoParser.ParserOptions = field(default_factory=ProtoParser.ParserOptions)\n parser_rules: list[ProtoParser.ParserRule] = field(default_factory=list)\n\n\nclass ViewGrpcProtobuf(base.View):\n \"\"\"Human friendly view of protocol buffers\"\"\"\n\n name = \"gRPC/Protocol Buffer\"\n __content_types_pb = [\n \"application/x-protobuf\",\n \"application/x-protobuffer\",\n \"application/grpc-proto\",\n ]\n __content_types_grpc = [\n \"application/grpc\",\n ]\n\n # first value serves as default algorithm for compressed messages, if 'grpc-encoding' header is missing\n __valid_grpc_encodings = [\n \"gzip\",\n \"identity\",\n \"deflate\",\n ]\n\n # allows to take external ParserOptions object. goes with defaults otherwise\n def __init__(self, config: ViewConfig = None) -> None:\n super().__init__()\n if config is None:\n config = ViewConfig()\n self.config = config\n\n def _matching_rules(\n self,\n rules: list[ProtoParser.ParserRule],\n message: http.Message | None,\n flow: flow.Flow | None,\n ) -> list[ProtoParser.ParserRule]:\n \"\"\"\n Checks which of the give rules applies and returns a List only containing those rules\n\n Each rule defines a flow filter in rule.filter which is usually matched against a flow.\n When it comes to protobuf parsing, in most cases request messages differ from response messages.\n Thus, it has to be possible to apply a rule to a http.Request or a http.Response, only.\n\n As the name flowfilter suggests, filters are working on a flow-level, not on message-level.\n This means:\n\n - the filter expression '~q' matches all flows with a request, but no response\n - the filter expression '~s' matches all flows with a response\n\n In result, for complete flows (with a gRPC message in the request and the response), ParserRules would\n either be applied to request and response at the same time ('~s') or neither would match request, nor\n response (~q).\n\n To distinguish between rules which should be applied to response messages, request messages or both\n (while being applied to the whole flow), different classes with same behavior are used to wrap rules:\n\n - ParserRule: applies to requests and responses\n - ParserRuleRequest: applies to requests only\n - ParserRuleResponse: applies to responses only\n \"\"\"\n res: list[ProtoParser.ParserRule] = []\n if not flow:\n return res\n is_request = isinstance(message, http.Request)\n for rule in rules:\n # message based rule matching\n if is_request and isinstance(rule, ProtoParser.ParserRuleResponse):\n continue\n elif not is_request and isinstance(rule, ProtoParser.ParserRuleRequest):\n continue\n # flow based rule matching\n if flowfilter.match(rule.filter, flow=flow):\n res.append(rule)\n return res\n\n def __call__(\n self,\n data: bytes,\n *,\n content_type: str | None = None,\n flow: flow.Flow | None = None,\n http_message: http.Message | None = None,\n **unknown_metadata,\n ) -> contentviews.TViewResult:\n applicabble_rules = self._matching_rules(\n rules=self.config.parser_rules, flow=flow, message=http_message\n )\n if content_type in self.__content_types_grpc:\n # If gRPC messages are flagged to be compressed, the compression algorithm is expressed in the\n # 'grpc-encoding' header.\n #\n # The following code tries to determine the compression algorithm base on this header.\n # If the header is not present or contains an unsupported compression, the logic falls back to\n # 'gzip'.\n #\n # If a compressed gRPC message is found in the body data (compressed flag set), the information\n # on the compression scheme is needed (even if not set by a header), in order to process the message.\n # Thus we assure there is always an encoding selected. An encoding of 'Identity' would not make\n # sense, if a message is flagged as being compressed, that's why a default is chosen.\n try:\n assert http_message is not None\n h = http_message.headers[\"grpc-encoding\"]\n grpc_encoding = (\n h\n if h in self.__valid_grpc_encodings\n else self.__valid_grpc_encodings[0]\n )\n except:\n grpc_encoding = self.__valid_grpc_encodings[0]\n\n text_iter = format_grpc(\n data=data,\n parser_options=self.config.parser_options,\n compression_scheme=grpc_encoding,\n rules=applicabble_rules,\n )\n title = \"gRPC\"\n else:\n text_iter = format_pbuf(\n message=data,\n parser_options=self.config.parser_options,\n rules=applicabble_rules,\n )\n title = \"Protobuf (flattened)\"\n\n # hacky bugfix, see description above generator functions format_pbuf/format_grpc\n try:\n text_iter = hack_generator_to_list(text_iter)\n except Exception as e:\n # hook to log exception tracebacks on iterators\n\n # import traceback\n # ctx.log.warn(\"gRPC contentview: {}\".format(traceback.format_exc()))\n raise e\n\n return title, text_iter\n\n def render_priority(\n self,\n data: bytes,\n *,\n content_type: str | None = None,\n flow: flow.Flow | None = None,\n http_message: http.Message | None = None,\n **unknown_metadata,\n ) -> float:\n\n if bool(data) and content_type in self.__content_types_grpc:\n return 1\n if bool(data) and content_type in self.__content_types_pb:\n # replace existing protobuf renderer preference (adjust by option)\n return 1.5\n else:\n return 0\n", "path": "mitmproxy/contentviews/grpc.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e575e51d4..f9f75be9fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ * Remove overambitious assertions in the HTTP state machine, fix some error handling. ([#5383](https://github.com/mitmproxy/mitmproxy/issues/5383), @mhils) +* Use default_factory for parser_options. + ([#5474](https://github.com/mitmproxy/mitmproxy/issues/5474), @rathann) ## 15 May 2022: mitmproxy 8.1.0 diff --git a/mitmproxy/contentviews/grpc.py b/mitmproxy/contentviews/grpc.py index a5ef99708f..5c73220c83 100644 --- a/mitmproxy/contentviews/grpc.py +++ b/mitmproxy/contentviews/grpc.py @@ -951,7 +951,7 @@ def format_grpc( @dataclass class ViewConfig: - parser_options: ProtoParser.ParserOptions = ProtoParser.ParserOptions() + parser_options: ProtoParser.ParserOptions = field(default_factory=ProtoParser.ParserOptions) parser_rules: list[ProtoParser.ParserRule] = field(default_factory=list)
huggingface__huggingface_hub-234
Error when creating a repository This error happens on huggingface_hub version 0.0.14 (current version) when creating a new dataset repository ```bash (py38) bash-3.2$ huggingface-cli repo create --type dataset codeparrot-train git version 2.29.2 git-lfs/2.13.3 (GitHub; darwin amd64; go 1.16.2; git a5e65851) Traceback (most recent call last): File "/Users/thomwolf/miniconda2/envs/py38/bin/huggingface-cli", line 8, in <module> sys.exit(main()) File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/huggingface_cli.py", line 41, in main service.run() File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/user.py", line 228, in run user, _ = self._api.whoami(token) ValueError: too many values to unpack (expected 2) ```
[ { "content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES\nfrom huggingface_hub.hf_api import HfApi, HfFolder\nfrom requests.exceptions import HTTPError\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\n \"login\", help=\"Log in using the same credentials as on huggingface.co\"\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\n \"whoami\", help=\"Find out which huggingface.co account you are logged in as.\"\n )\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n # new system: git-based repo system\n repo_parser = parser.add_parser(\n \"repo\",\n help=\"{create, ls-files} Commands to interact with your huggingface.co repos.\",\n )\n repo_subparsers = repo_parser.add_subparsers(\n help=\"huggingface.co repos related commands\"\n )\n ls_parser = repo_subparsers.add_parser(\n \"ls-files\", help=\"List all your files on huggingface.co\"\n )\n ls_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))\n repo_create_parser = repo_subparsers.add_parser(\n \"create\", help=\"Create a new repo on huggingface.co\"\n )\n repo_create_parser.add_argument(\n \"name\",\n type=str,\n help=\"Name for your repo. Will be namespaced under your username to build the repo id.\",\n )\n repo_create_parser.add_argument(\n \"--type\",\n type=str,\n help='Optional: repo_type: set to \"dataset\" or \"space\" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n repo_create_parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"Optional: answer Yes to the prompt\",\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _gray = \"\\u001b[90m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n @classmethod\n def gray(cls, s):\n return \"{}{}{}\".format(cls._gray, s, cls._reset)\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print( # docstyle-ignore\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n info = self._api.whoami(token)\n print(info[\"name\"])\n orgs = [org[\"name\"] for org in info[\"orgs\"]]\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListReposObjsCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_repos_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]\n print(\n tabulate(rows, headers=[\"Filename\", \"LastModified\", \"Commit-Sha\", \"Size\"])\n )\n\n\nclass RepoCreateCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n stdout = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\"Looks like you do not have git installed, please install.\")\n\n try:\n stdout = subprocess.check_output([\"git-lfs\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\n ANSI.red(\n \"Looks like you do not have git-lfs installed, please install.\"\n \" You can install from https://git-lfs.github.com/.\"\n \" Then run `git lfs install` (you only have to do this once).\"\n )\n )\n print(\"\")\n\n user, _ = self._api.whoami(token)\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n\n repo_id = f\"{namespace}/{self.args.name}\"\n\n if self.args.type not in REPO_TYPES:\n print(\"Invalid repo --type\")\n exit(1)\n\n if self.args.type in REPO_TYPES_URL_PREFIXES:\n repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id\n\n print(\"You are about to create {}\".format(ANSI.bold(repo_id)))\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n try:\n url = self._api.create_repo(\n token,\n name=self.args.name,\n organization=self.args.organization,\n repo_type=self.args.type,\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"\\nYour repo now lives at:\")\n print(\" {}\".format(ANSI.bold(url)))\n print(\n \"\\nYou can clone it locally with the command below,\"\n \" and commit/push as usual.\"\n )\n print(f\"\\n git clone {url}\")\n print(\"\")\n", "path": "src/huggingface_hub/commands/user.py" } ]
[ { "content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES\nfrom huggingface_hub.hf_api import HfApi, HfFolder\nfrom requests.exceptions import HTTPError\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\n \"login\", help=\"Log in using the same credentials as on huggingface.co\"\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\n \"whoami\", help=\"Find out which huggingface.co account you are logged in as.\"\n )\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n # new system: git-based repo system\n repo_parser = parser.add_parser(\n \"repo\",\n help=\"{create, ls-files} Commands to interact with your huggingface.co repos.\",\n )\n repo_subparsers = repo_parser.add_subparsers(\n help=\"huggingface.co repos related commands\"\n )\n ls_parser = repo_subparsers.add_parser(\n \"ls-files\", help=\"List all your files on huggingface.co\"\n )\n ls_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))\n repo_create_parser = repo_subparsers.add_parser(\n \"create\", help=\"Create a new repo on huggingface.co\"\n )\n repo_create_parser.add_argument(\n \"name\",\n type=str,\n help=\"Name for your repo. Will be namespaced under your username to build the repo id.\",\n )\n repo_create_parser.add_argument(\n \"--type\",\n type=str,\n help='Optional: repo_type: set to \"dataset\" or \"space\" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n repo_create_parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"Optional: answer Yes to the prompt\",\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _gray = \"\\u001b[90m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n @classmethod\n def gray(cls, s):\n return \"{}{}{}\".format(cls._gray, s, cls._reset)\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print( # docstyle-ignore\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n info = self._api.whoami(token)\n print(info[\"name\"])\n orgs = [org[\"name\"] for org in info[\"orgs\"]]\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListReposObjsCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_repos_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]\n print(\n tabulate(rows, headers=[\"Filename\", \"LastModified\", \"Commit-Sha\", \"Size\"])\n )\n\n\nclass RepoCreateCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n stdout = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\"Looks like you do not have git installed, please install.\")\n\n try:\n stdout = subprocess.check_output([\"git-lfs\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\n ANSI.red(\n \"Looks like you do not have git-lfs installed, please install.\"\n \" You can install from https://git-lfs.github.com/.\"\n \" Then run `git lfs install` (you only have to do this once).\"\n )\n )\n print(\"\")\n\n user = self._api.whoami(token)[\"name\"]\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n\n repo_id = f\"{namespace}/{self.args.name}\"\n\n if self.args.type not in REPO_TYPES:\n print(\"Invalid repo --type\")\n exit(1)\n\n if self.args.type in REPO_TYPES_URL_PREFIXES:\n repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id\n\n print(\"You are about to create {}\".format(ANSI.bold(repo_id)))\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n try:\n url = self._api.create_repo(\n token,\n name=self.args.name,\n organization=self.args.organization,\n repo_type=self.args.type,\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"\\nYour repo now lives at:\")\n print(\" {}\".format(ANSI.bold(url)))\n print(\n \"\\nYou can clone it locally with the command below,\"\n \" and commit/push as usual.\"\n )\n print(f\"\\n git clone {url}\")\n print(\"\")\n", "path": "src/huggingface_hub/commands/user.py" } ]
diff --git a/src/huggingface_hub/commands/user.py b/src/huggingface_hub/commands/user.py index 7c02d87fc4..1da7debe3a 100644 --- a/src/huggingface_hub/commands/user.py +++ b/src/huggingface_hub/commands/user.py @@ -224,7 +224,7 @@ def run(self): ) print("") - user, _ = self._api.whoami(token) + user = self._api.whoami(token)["name"] namespace = ( self.args.organization if self.args.organization is not None else user )
svthalia__concrexit-1802
AttributeError: 'Event' object has no attribute 'title_en' Sentry Issue: [CONCREXIT-70](https://sentry.io/organizations/thalia/issues/2487433496/?referrer=github_integration) ``` AttributeError: 'Event' object has no attribute 'title_en' (9 additional frame(s) were not displayed) ... File "django/contrib/admin/options.py", line 1540, in changeform_view return self._changeform_view(request, object_id, form_url, extra_context) File "django/contrib/admin/options.py", line 1586, in _changeform_view self.save_model(request, new_object, form, not add) File "photos/admin.py", line 45, in save_model super().save_model(request, obj, form, change) File "django/contrib/admin/options.py", line 1099, in save_model obj.save() File "photos/models.py", line 158, in save self.title = self.event.title_en ```
[ { "content": "import hashlib\nimport logging\nimport os\nimport random\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom events.models import Event\nfrom pushnotifications.models import ScheduledMessage, Category\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n \"\"\"Get path of file to upload to.\"\"\"\n num = instance.album.photo_set.count()\n extension = os.path.splitext(filename)[1]\n new_filename = str(num).zfill(4) + extension\n return os.path.join(Album.photosdir, instance.album.dirname, new_filename)\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = models.ImageField(_(\"file\"), upload_to=photo_uploadto)\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n hidden = models.BooleanField(_(\"hidden\"), default=False)\n\n _digest = models.CharField(\"digest\", max_length=40,)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.path\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n ordering = (\"file\",)\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(verbose_name=_(\"directory name\"), max_length=200,)\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(verbose_name=_(\"slug\"), unique=True,)\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n new_album_notification = models.ForeignKey(\n ScheduledMessage, on_delete=models.deletion.SET_NULL, blank=True, null=True\n )\n\n event = models.ForeignKey(Event, on_delete=models.SET_NULL, blank=True, null=True)\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n if self.photo_set.exists():\n random.seed(self.dirname)\n cover = random.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return \"{} {}\".format(self.date.strftime(\"%Y-%m-%d\"), self.title)\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title_en\n\n if not self.date:\n self.date = self.event.start.date()\n\n if not self.hidden and (\n self.new_album_notification is None or not self.new_album_notification.sent\n ):\n new_album_notification_time = timezone.now() + timezone.timedelta(hours=1)\n new_album_notification = ScheduledMessage()\n\n if (\n self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n new_album_notification = self.new_album_notification\n\n new_album_notification.title_en = \"New album uploaded\"\n new_album_notification.body_en = (\n f\"A new photo album '{self.title}' has just been uploaded\"\n )\n new_album_notification.category = Category.objects.get(key=Category.PHOTO)\n new_album_notification.url = f\"{settings.BASE_URL}{self.get_absolute_url()}\"\n new_album_notification.time = new_album_notification_time\n new_album_notification.save()\n self.new_album_notification = new_album_notification\n self.new_album_notification.users.set(Member.current_members.all())\n elif (\n self.hidden\n and self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n existing_notification = self.new_album_notification\n self.new_album_notification = None\n existing_notification.delete()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n \"{}album{}\".format(settings.SECRET_KEY, self.pk).encode(\"utf-8\")\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py" } ]
[ { "content": "import hashlib\nimport logging\nimport os\nimport random\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom events.models import Event\nfrom pushnotifications.models import ScheduledMessage, Category\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n \"\"\"Get path of file to upload to.\"\"\"\n num = instance.album.photo_set.count()\n extension = os.path.splitext(filename)[1]\n new_filename = str(num).zfill(4) + extension\n return os.path.join(Album.photosdir, instance.album.dirname, new_filename)\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = models.ImageField(_(\"file\"), upload_to=photo_uploadto)\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n hidden = models.BooleanField(_(\"hidden\"), default=False)\n\n _digest = models.CharField(\"digest\", max_length=40,)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.path\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n ordering = (\"file\",)\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(verbose_name=_(\"directory name\"), max_length=200,)\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(verbose_name=_(\"slug\"), unique=True,)\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n new_album_notification = models.ForeignKey(\n ScheduledMessage, on_delete=models.deletion.SET_NULL, blank=True, null=True\n )\n\n event = models.ForeignKey(Event, on_delete=models.SET_NULL, blank=True, null=True)\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n if self.photo_set.exists():\n random.seed(self.dirname)\n cover = random.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return \"{} {}\".format(self.date.strftime(\"%Y-%m-%d\"), self.title)\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title\n\n if not self.date:\n self.date = self.event.start.date()\n\n if not self.hidden and (\n self.new_album_notification is None or not self.new_album_notification.sent\n ):\n new_album_notification_time = timezone.now() + timezone.timedelta(hours=1)\n new_album_notification = ScheduledMessage()\n\n if (\n self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n new_album_notification = self.new_album_notification\n\n new_album_notification.title_en = \"New album uploaded\"\n new_album_notification.body_en = (\n f\"A new photo album '{self.title}' has just been uploaded\"\n )\n new_album_notification.category = Category.objects.get(key=Category.PHOTO)\n new_album_notification.url = f\"{settings.BASE_URL}{self.get_absolute_url()}\"\n new_album_notification.time = new_album_notification_time\n new_album_notification.save()\n self.new_album_notification = new_album_notification\n self.new_album_notification.users.set(Member.current_members.all())\n elif (\n self.hidden\n and self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n existing_notification = self.new_album_notification\n self.new_album_notification = None\n existing_notification.delete()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n \"{}album{}\".format(settings.SECRET_KEY, self.pk).encode(\"utf-8\")\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py" } ]
diff --git a/website/photos/models.py b/website/photos/models.py index f479425a8..2d6498526 100644 --- a/website/photos/models.py +++ b/website/photos/models.py @@ -155,7 +155,7 @@ def save(self, **kwargs): self.dirname = self.slug if not self.title and self.event: - self.title = self.event.title_en + self.title = self.event.title if not self.date: self.date = self.event.start.date()
mkdocs__mkdocs-1122
AttributeError: 'module' object has no attribute 'TornadoAsyncNotifier' Using Python 2.6.6 on CentOS, I'm unable to run the server with livereload: ``` [mkdocs@dev test-docs]$ python /home/mkdocs/.local/lib/python2.6/site-packages/mkdocs/__main__.py serve --dev-addr=0.0.0.0:8080 WARNING: Support for Python 2.6 will be dropped in the 1.0.0 release of MkDocs INFO - Building documentation... INFO - Cleaning site directory [I 161205 22:16:26 server:283] Serving on http://0.0.0.0:8080 [I 161205 22:16:26 handlers:60] Start watching changes Traceback (most recent call last): File "/home/mkdocs/.local/lib/python2.6/site-packages/mkdocs/__main__.py", line 227, in <module> cli() File "/home/mkdocs/.local/lib/python2.6/site-packages/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/home/mkdocs/.local/lib/python2.6/site-packages/click/core.py", line 696, in main rv = self.invoke(ctx) File "/home/mkdocs/.local/lib/python2.6/site-packages/click/core.py", line 1060, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/mkdocs/.local/lib/python2.6/site-packages/click/core.py", line 889, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/mkdocs/.local/lib/python2.6/site-packages/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/home/mkdocs/.local/lib/python2.6/site-packages/mkdocs/__main__.py", line 127, in serve_command livereload=livereload File "/home/mkdocs/.local/lib/python2.6/site-packages/mkdocs/commands/serve.py", line 88, in serve _livereload(host, port, config, builder, tempdir) File "/home/mkdocs/.local/lib/python2.6/site-packages/mkdocs/commands/serve.py", line 27, in _livereload server.serve(root=site_dir, host=host, port=int(port), restart_delay=0) File "/home/mkdocs/.local/lib/python2.6/site-packages/livereload/server.py", line 300, in serve LiveReloadHandler.start_tasks() File "/home/mkdocs/.local/lib/python2.6/site-packages/livereload/handlers.py", line 61, in start_tasks if not cls.watcher.start(cls.poll_tasks): File "/home/mkdocs/.local/lib/python2.6/site-packages/livereload/watcher.py", line 160, in start self.notifier = pyinotify.TornadoAsyncNotifier( AttributeError: 'module' object has no attribute 'TornadoAsyncNotifier' ``` I can workaround by using `--no-livereload`, but is there a way to get livereload working?
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\nPY26 = sys.version_info[:2] == (2, 6)\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='http://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.7.1',\n 'livereload>=2.3.2',\n 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=4.1',\n ],\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ]\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n \"Programming Language :: Python :: Implementation :: CPython\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\nPY26 = sys.version_info[:2] == (2, 6)\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='http://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.7.1',\n 'livereload>=2.5.1',\n 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=4.1',\n ],\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ]\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n \"Programming Language :: Python :: Implementation :: CPython\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py" } ]
diff --git a/requirements/project-min.txt b/requirements/project-min.txt index 7dc6e95ecf..6b0262e0d1 100644 --- a/requirements/project-min.txt +++ b/requirements/project-min.txt @@ -1,6 +1,6 @@ click==3.3 Jinja2==2.7.1 -livereload==2.3.2 +livereload==2.5.1 Markdown==2.5 PyYAML==3.10 tornado==4.1 diff --git a/requirements/project.txt b/requirements/project.txt index 6e3c9da2ed..bc2ab3c5c6 100644 --- a/requirements/project.txt +++ b/requirements/project.txt @@ -1,6 +1,6 @@ click>=3.3 Jinja2>=2.7.1 -livereload>=2.3.2 +livereload>=2.5.1 Markdown>=2.5 PyYAML>=3.10 tornado>=4.1 diff --git a/setup.py b/setup.py index 81eb83feb3..cdafa1ddea 100755 --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ def get_packages(package): install_requires=[ 'click>=3.3', 'Jinja2>=2.7.1', - 'livereload>=2.3.2', + 'livereload>=2.5.1', 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1', 'PyYAML>=3.10', 'tornado>=4.1',
roboflow__supervision-845
[LineZone] - flip in/out line crossing directions ## Description Between `supervision-0.17.0` and `supervision-0.18.0`, releases in/out of the direction of the crossing were accidentally changed. Given that `LineZone` is one of the oldest features we have we do not want to make life difficult for users and want to restore the previous behavior. The change made in this [PR](https://github.com/roboflow/supervision/pull/735), most likely in this [line](https://github.com/roboflow/supervision/blob/0ccb0b85adee4202f5fe96834a374a057bbbd9da/supervision/detection/line_counter.py#L140), is responsible for the change in behavior. https://github.com/roboflow/supervision/blob/0ccb0b85adee4202f5fe96834a374a057bbbd9da/supervision/detection/line_counter.py#L140 ### Minimal Reproducible Example You can easily confirm the crossing direction change between `supervision-0.17.0` and `supervision-0.18.0` releases using this [notebook](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/how-to-track-and-count-vehicles-with-yolov8-and-supervison.ipynb). Here are example results. __supervision-0.17.0__ https://github.com/roboflow/supervision/assets/26109316/32e0f95c-9204-4703-ab25-c2255a597720 __supervision-0.18.0__ https://github.com/roboflow/supervision/assets/26109316/af6db77e-24f8-4338-9925-3c80afe178f8 ### Additional - Note: Please share a Google Colab with minimal code to test the new feature. We know it's additional work, but it will speed up the review process. The reviewer must test each change. Setting up a local environment to do this is time-consuming. Please ensure that Google Colab can be accessed without any issues (make it public). Thank you! 🙏🏻
[ { "content": "from typing import Dict, Iterable, Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom supervision.detection.core import Detections\nfrom supervision.draw.color import Color\nfrom supervision.draw.utils import draw_text\nfrom supervision.geometry.core import Point, Position, Vector\n\n\nclass LineZone:\n \"\"\"\n This class is responsible for counting the number of objects that cross a\n predefined line.\n\n !!! warning\n\n LineZone uses the `tracker_id`. Read\n [here](/latest/trackers/) to learn how to plug\n tracking into your inference pipeline.\n\n Attributes:\n in_count (int): The number of objects that have crossed the line from outside\n to inside.\n out_count (int): The number of objects that have crossed the line from inside\n to outside.\n \"\"\"\n\n def __init__(\n self,\n start: Point,\n end: Point,\n triggering_anchors: Iterable[Position] = (\n Position.TOP_LEFT,\n Position.TOP_RIGHT,\n Position.BOTTOM_LEFT,\n Position.BOTTOM_RIGHT,\n ),\n ):\n \"\"\"\n Args:\n start (Point): The starting point of the line.\n end (Point): The ending point of the line.\n triggering_anchors (List[sv.Position]): A list of positions\n specifying which anchors of the detections bounding box\n to consider when deciding on whether the detection\n has passed the line counter or not. By default, this\n contains the four corners of the detection's bounding box\n \"\"\"\n self.vector = Vector(start=start, end=end)\n self.limits = self.calculate_region_of_interest_limits(vector=self.vector)\n self.tracker_state: Dict[str, bool] = {}\n self.in_count: int = 0\n self.out_count: int = 0\n self.triggering_anchors = triggering_anchors\n\n @staticmethod\n def calculate_region_of_interest_limits(vector: Vector) -> Tuple[Vector, Vector]:\n magnitude = vector.magnitude\n\n if magnitude == 0:\n raise ValueError(\"The magnitude of the vector cannot be zero.\")\n\n delta_x = vector.end.x - vector.start.x\n delta_y = vector.end.y - vector.start.y\n\n unit_vector_x = delta_x / magnitude\n unit_vector_y = delta_y / magnitude\n\n perpendicular_vector_x = -unit_vector_y\n perpendicular_vector_y = unit_vector_x\n\n start_region_limit = Vector(\n start=vector.start,\n end=Point(\n x=vector.start.x + perpendicular_vector_x,\n y=vector.start.y + perpendicular_vector_y,\n ),\n )\n end_region_limit = Vector(\n start=vector.end,\n end=Point(\n x=vector.end.x - perpendicular_vector_x,\n y=vector.end.y - perpendicular_vector_y,\n ),\n )\n return start_region_limit, end_region_limit\n\n @staticmethod\n def is_point_in_limits(point: Point, limits: Tuple[Vector, Vector]) -> bool:\n cross_product_1 = limits[0].cross_product(point)\n cross_product_2 = limits[1].cross_product(point)\n return (cross_product_1 > 0) == (cross_product_2 > 0)\n\n def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Update the `in_count` and `out_count` based on the objects that cross the line.\n\n Args:\n detections (Detections): A list of detections for which to update the\n counts.\n\n Returns:\n A tuple of two boolean NumPy arrays. The first array indicates which\n detections have crossed the line from outside to inside. The second\n array indicates which detections have crossed the line from inside to\n outside.\n \"\"\"\n crossed_in = np.full(len(detections), False)\n crossed_out = np.full(len(detections), False)\n\n if len(detections) == 0:\n return crossed_in, crossed_out\n\n all_anchors = np.array(\n [\n detections.get_anchors_coordinates(anchor)\n for anchor in self.triggering_anchors\n ]\n )\n\n for i, tracker_id in enumerate(detections.tracker_id):\n if tracker_id is None:\n continue\n\n box_anchors = [Point(x=x, y=y) for x, y in all_anchors[:, i, :]]\n\n in_limits = all(\n [\n self.is_point_in_limits(point=anchor, limits=self.limits)\n for anchor in box_anchors\n ]\n )\n\n if not in_limits:\n continue\n\n triggers = [\n self.vector.cross_product(point=anchor) > 0 for anchor in box_anchors\n ]\n\n if len(set(triggers)) == 2:\n continue\n\n tracker_state = triggers[0]\n\n if tracker_id not in self.tracker_state:\n self.tracker_state[tracker_id] = tracker_state\n continue\n\n if self.tracker_state.get(tracker_id) == tracker_state:\n continue\n\n self.tracker_state[tracker_id] = tracker_state\n if tracker_state:\n self.in_count += 1\n crossed_in[i] = True\n else:\n self.out_count += 1\n crossed_out[i] = True\n\n return crossed_in, crossed_out\n\n\nclass LineZoneAnnotator:\n def __init__(\n self,\n thickness: float = 2,\n color: Color = Color.WHITE,\n text_thickness: float = 2,\n text_color: Color = Color.BLACK,\n text_scale: float = 0.5,\n text_offset: float = 1.5,\n text_padding: int = 10,\n custom_in_text: Optional[str] = None,\n custom_out_text: Optional[str] = None,\n display_in_count: bool = True,\n display_out_count: bool = True,\n ):\n \"\"\"\n Initialize the LineCounterAnnotator object with default values.\n\n Attributes:\n thickness (float): The thickness of the line that will be drawn.\n color (Color): The color of the line that will be drawn.\n text_thickness (float): The thickness of the text that will be drawn.\n text_color (Color): The color of the text that will be drawn.\n text_scale (float): The scale of the text that will be drawn.\n text_offset (float): The offset of the text that will be drawn.\n text_padding (int): The padding of the text that will be drawn.\n display_in_count (bool): Whether to display the in count or not.\n display_out_count (bool): Whether to display the out count or not.\n\n \"\"\"\n self.thickness: float = thickness\n self.color: Color = color\n self.text_thickness: float = text_thickness\n self.text_color: Color = text_color\n self.text_scale: float = text_scale\n self.text_offset: float = text_offset\n self.text_padding: int = text_padding\n self.custom_in_text: str = custom_in_text\n self.custom_out_text: str = custom_out_text\n self.display_in_count: bool = display_in_count\n self.display_out_count: bool = display_out_count\n\n def _annotate_count(\n self,\n frame: np.ndarray,\n center_text_anchor: Point,\n text: str,\n is_in_count: bool,\n ) -> None:\n \"\"\"This method is drawing the text on the frame.\n\n Args:\n frame (np.ndarray): The image on which the text will be drawn.\n center_text_anchor: The center point that the text will be drawn.\n text (str): The text that will be drawn.\n is_in_count (bool): Whether to display the in count or out count.\n \"\"\"\n _, text_height = cv2.getTextSize(\n text, cv2.FONT_HERSHEY_SIMPLEX, self.text_scale, self.text_thickness\n )[0]\n\n if is_in_count:\n center_text_anchor.y -= int(self.text_offset * text_height)\n else:\n center_text_anchor.y += int(self.text_offset * text_height)\n\n draw_text(\n scene=frame,\n text=text,\n text_anchor=center_text_anchor,\n text_color=self.text_color,\n text_scale=self.text_scale,\n text_thickness=self.text_thickness,\n text_padding=self.text_padding,\n background_color=self.color,\n )\n\n def annotate(self, frame: np.ndarray, line_counter: LineZone) -> np.ndarray:\n \"\"\"\n Draws the line on the frame using the line_counter provided.\n\n Attributes:\n frame (np.ndarray): The image on which the line will be drawn.\n line_counter (LineCounter): The line counter\n that will be used to draw the line.\n\n Returns:\n np.ndarray: The image with the line drawn on it.\n\n \"\"\"\n cv2.line(\n frame,\n line_counter.vector.start.as_xy_int_tuple(),\n line_counter.vector.end.as_xy_int_tuple(),\n self.color.as_bgr(),\n self.thickness,\n lineType=cv2.LINE_AA,\n shift=0,\n )\n cv2.circle(\n frame,\n line_counter.vector.start.as_xy_int_tuple(),\n radius=5,\n color=self.text_color.as_bgr(),\n thickness=-1,\n lineType=cv2.LINE_AA,\n )\n cv2.circle(\n frame,\n line_counter.vector.end.as_xy_int_tuple(),\n radius=5,\n color=self.text_color.as_bgr(),\n thickness=-1,\n lineType=cv2.LINE_AA,\n )\n\n text_anchor = Vector(\n start=line_counter.vector.start, end=line_counter.vector.end\n )\n\n if self.display_in_count:\n in_text = (\n f\"{self.custom_in_text}: {line_counter.in_count}\"\n if self.custom_in_text is not None\n else f\"in: {line_counter.in_count}\"\n )\n self._annotate_count(\n frame=frame,\n center_text_anchor=text_anchor.center,\n text=in_text,\n is_in_count=True,\n )\n\n if self.display_out_count:\n out_text = (\n f\"{self.custom_out_text}: {line_counter.out_count}\"\n if self.custom_out_text is not None\n else f\"out: {line_counter.out_count}\"\n )\n self._annotate_count(\n frame=frame,\n center_text_anchor=text_anchor.center,\n text=out_text,\n is_in_count=False,\n )\n return frame\n", "path": "supervision/detection/line_counter.py" } ]
[ { "content": "from typing import Dict, Iterable, Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom supervision.detection.core import Detections\nfrom supervision.draw.color import Color\nfrom supervision.draw.utils import draw_text\nfrom supervision.geometry.core import Point, Position, Vector\n\n\nclass LineZone:\n \"\"\"\n This class is responsible for counting the number of objects that cross a\n predefined line.\n\n !!! warning\n\n LineZone uses the `tracker_id`. Read\n [here](/latest/trackers/) to learn how to plug\n tracking into your inference pipeline.\n\n Attributes:\n in_count (int): The number of objects that have crossed the line from outside\n to inside.\n out_count (int): The number of objects that have crossed the line from inside\n to outside.\n \"\"\"\n\n def __init__(\n self,\n start: Point,\n end: Point,\n triggering_anchors: Iterable[Position] = (\n Position.TOP_LEFT,\n Position.TOP_RIGHT,\n Position.BOTTOM_LEFT,\n Position.BOTTOM_RIGHT,\n ),\n ):\n \"\"\"\n Args:\n start (Point): The starting point of the line.\n end (Point): The ending point of the line.\n triggering_anchors (List[sv.Position]): A list of positions\n specifying which anchors of the detections bounding box\n to consider when deciding on whether the detection\n has passed the line counter or not. By default, this\n contains the four corners of the detection's bounding box\n \"\"\"\n self.vector = Vector(start=start, end=end)\n self.limits = self.calculate_region_of_interest_limits(vector=self.vector)\n self.tracker_state: Dict[str, bool] = {}\n self.in_count: int = 0\n self.out_count: int = 0\n self.triggering_anchors = triggering_anchors\n\n @staticmethod\n def calculate_region_of_interest_limits(vector: Vector) -> Tuple[Vector, Vector]:\n magnitude = vector.magnitude\n\n if magnitude == 0:\n raise ValueError(\"The magnitude of the vector cannot be zero.\")\n\n delta_x = vector.end.x - vector.start.x\n delta_y = vector.end.y - vector.start.y\n\n unit_vector_x = delta_x / magnitude\n unit_vector_y = delta_y / magnitude\n\n perpendicular_vector_x = -unit_vector_y\n perpendicular_vector_y = unit_vector_x\n\n start_region_limit = Vector(\n start=vector.start,\n end=Point(\n x=vector.start.x + perpendicular_vector_x,\n y=vector.start.y + perpendicular_vector_y,\n ),\n )\n end_region_limit = Vector(\n start=vector.end,\n end=Point(\n x=vector.end.x - perpendicular_vector_x,\n y=vector.end.y - perpendicular_vector_y,\n ),\n )\n return start_region_limit, end_region_limit\n\n @staticmethod\n def is_point_in_limits(point: Point, limits: Tuple[Vector, Vector]) -> bool:\n cross_product_1 = limits[0].cross_product(point)\n cross_product_2 = limits[1].cross_product(point)\n return (cross_product_1 > 0) == (cross_product_2 > 0)\n\n def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Update the `in_count` and `out_count` based on the objects that cross the line.\n\n Args:\n detections (Detections): A list of detections for which to update the\n counts.\n\n Returns:\n A tuple of two boolean NumPy arrays. The first array indicates which\n detections have crossed the line from outside to inside. The second\n array indicates which detections have crossed the line from inside to\n outside.\n \"\"\"\n crossed_in = np.full(len(detections), False)\n crossed_out = np.full(len(detections), False)\n\n if len(detections) == 0:\n return crossed_in, crossed_out\n\n all_anchors = np.array(\n [\n detections.get_anchors_coordinates(anchor)\n for anchor in self.triggering_anchors\n ]\n )\n\n for i, tracker_id in enumerate(detections.tracker_id):\n if tracker_id is None:\n continue\n\n box_anchors = [Point(x=x, y=y) for x, y in all_anchors[:, i, :]]\n\n in_limits = all(\n [\n self.is_point_in_limits(point=anchor, limits=self.limits)\n for anchor in box_anchors\n ]\n )\n\n if not in_limits:\n continue\n\n triggers = [\n self.vector.cross_product(point=anchor) < 0 for anchor in box_anchors\n ]\n\n if len(set(triggers)) == 2:\n continue\n\n tracker_state = triggers[0]\n\n if tracker_id not in self.tracker_state:\n self.tracker_state[tracker_id] = tracker_state\n continue\n\n if self.tracker_state.get(tracker_id) == tracker_state:\n continue\n\n self.tracker_state[tracker_id] = tracker_state\n if tracker_state:\n self.in_count += 1\n crossed_in[i] = True\n else:\n self.out_count += 1\n crossed_out[i] = True\n\n return crossed_in, crossed_out\n\n\nclass LineZoneAnnotator:\n def __init__(\n self,\n thickness: float = 2,\n color: Color = Color.WHITE,\n text_thickness: float = 2,\n text_color: Color = Color.BLACK,\n text_scale: float = 0.5,\n text_offset: float = 1.5,\n text_padding: int = 10,\n custom_in_text: Optional[str] = None,\n custom_out_text: Optional[str] = None,\n display_in_count: bool = True,\n display_out_count: bool = True,\n ):\n \"\"\"\n Initialize the LineCounterAnnotator object with default values.\n\n Attributes:\n thickness (float): The thickness of the line that will be drawn.\n color (Color): The color of the line that will be drawn.\n text_thickness (float): The thickness of the text that will be drawn.\n text_color (Color): The color of the text that will be drawn.\n text_scale (float): The scale of the text that will be drawn.\n text_offset (float): The offset of the text that will be drawn.\n text_padding (int): The padding of the text that will be drawn.\n display_in_count (bool): Whether to display the in count or not.\n display_out_count (bool): Whether to display the out count or not.\n\n \"\"\"\n self.thickness: float = thickness\n self.color: Color = color\n self.text_thickness: float = text_thickness\n self.text_color: Color = text_color\n self.text_scale: float = text_scale\n self.text_offset: float = text_offset\n self.text_padding: int = text_padding\n self.custom_in_text: str = custom_in_text\n self.custom_out_text: str = custom_out_text\n self.display_in_count: bool = display_in_count\n self.display_out_count: bool = display_out_count\n\n def _annotate_count(\n self,\n frame: np.ndarray,\n center_text_anchor: Point,\n text: str,\n is_in_count: bool,\n ) -> None:\n \"\"\"This method is drawing the text on the frame.\n\n Args:\n frame (np.ndarray): The image on which the text will be drawn.\n center_text_anchor: The center point that the text will be drawn.\n text (str): The text that will be drawn.\n is_in_count (bool): Whether to display the in count or out count.\n \"\"\"\n _, text_height = cv2.getTextSize(\n text, cv2.FONT_HERSHEY_SIMPLEX, self.text_scale, self.text_thickness\n )[0]\n\n if is_in_count:\n center_text_anchor.y -= int(self.text_offset * text_height)\n else:\n center_text_anchor.y += int(self.text_offset * text_height)\n\n draw_text(\n scene=frame,\n text=text,\n text_anchor=center_text_anchor,\n text_color=self.text_color,\n text_scale=self.text_scale,\n text_thickness=self.text_thickness,\n text_padding=self.text_padding,\n background_color=self.color,\n )\n\n def annotate(self, frame: np.ndarray, line_counter: LineZone) -> np.ndarray:\n \"\"\"\n Draws the line on the frame using the line_counter provided.\n\n Attributes:\n frame (np.ndarray): The image on which the line will be drawn.\n line_counter (LineCounter): The line counter\n that will be used to draw the line.\n\n Returns:\n np.ndarray: The image with the line drawn on it.\n\n \"\"\"\n cv2.line(\n frame,\n line_counter.vector.start.as_xy_int_tuple(),\n line_counter.vector.end.as_xy_int_tuple(),\n self.color.as_bgr(),\n self.thickness,\n lineType=cv2.LINE_AA,\n shift=0,\n )\n cv2.circle(\n frame,\n line_counter.vector.start.as_xy_int_tuple(),\n radius=5,\n color=self.text_color.as_bgr(),\n thickness=-1,\n lineType=cv2.LINE_AA,\n )\n cv2.circle(\n frame,\n line_counter.vector.end.as_xy_int_tuple(),\n radius=5,\n color=self.text_color.as_bgr(),\n thickness=-1,\n lineType=cv2.LINE_AA,\n )\n\n text_anchor = Vector(\n start=line_counter.vector.start, end=line_counter.vector.end\n )\n\n if self.display_in_count:\n in_text = (\n f\"{self.custom_in_text}: {line_counter.in_count}\"\n if self.custom_in_text is not None\n else f\"in: {line_counter.in_count}\"\n )\n self._annotate_count(\n frame=frame,\n center_text_anchor=text_anchor.center,\n text=in_text,\n is_in_count=True,\n )\n\n if self.display_out_count:\n out_text = (\n f\"{self.custom_out_text}: {line_counter.out_count}\"\n if self.custom_out_text is not None\n else f\"out: {line_counter.out_count}\"\n )\n self._annotate_count(\n frame=frame,\n center_text_anchor=text_anchor.center,\n text=out_text,\n is_in_count=False,\n )\n return frame\n", "path": "supervision/detection/line_counter.py" } ]
diff --git a/supervision/detection/line_counter.py b/supervision/detection/line_counter.py index 09efd49da..8f09f2197 100644 --- a/supervision/detection/line_counter.py +++ b/supervision/detection/line_counter.py @@ -137,7 +137,7 @@ def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]: continue triggers = [ - self.vector.cross_product(point=anchor) > 0 for anchor in box_anchors + self.vector.cross_product(point=anchor) < 0 for anchor in box_anchors ] if len(set(triggers)) == 2:
python-gitlab__python-gitlab-1058
Scope "bug" ## Description of the problem, including code/CLI snippet I am using the list method of project.issues to list issues with certain label. I am searching issue having "vulcheck" as label. In my project I have one issue with "vulcheck" label ```python def issue_by_project_label(self): print(self._project.issues.list(labels="vulcheck")) ``` ## Expected Behavior It should return me the issues with label "vulcheck". I have one issue with label "vulcheck". ## Actual Behavior It is returning empty list ## Specifications - python-gitlab version: 2.1.2 - API version you are using (v3/v4): v4 - Gitlab server version (or gitlab.com): 12.6.6-ee
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\nclass GitlabAttribute(object):\n def __init__(self, value=None):\n self._value = value\n\n def get(self):\n return self._value\n\n def set_from_cli(self, cli_value):\n self._value = cli_value\n\n def get_for_api(self):\n return self._value\n\n\nclass ListAttribute(GitlabAttribute):\n def set_from_cli(self, cli_value):\n if not cli_value.strip():\n self._value = []\n else:\n self._value = [item.strip() for item in cli_value.split(\",\")]\n\n def get_for_api(self):\n return \",\".join(self._value)\n\n\nclass LowercaseStringAttribute(GitlabAttribute):\n def get_for_api(self):\n return str(self._value).lower()\n\n\nclass FileAttribute(GitlabAttribute):\n def get_file_name(self, attr_name=None):\n return attr_name\n\n\nclass ImageAttribute(FileAttribute):\n def get_file_name(self, attr_name=None):\n return \"%s.png\" % attr_name if attr_name else \"image.png\"\n", "path": "gitlab/types.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\nclass GitlabAttribute(object):\n def __init__(self, value=None):\n self._value = value\n\n def get(self):\n return self._value\n\n def set_from_cli(self, cli_value):\n self._value = cli_value\n\n def get_for_api(self):\n return self._value\n\n\nclass ListAttribute(GitlabAttribute):\n def set_from_cli(self, cli_value):\n if not cli_value.strip():\n self._value = []\n else:\n self._value = [item.strip() for item in cli_value.split(\",\")]\n\n def get_for_api(self):\n # Do not comma-split single value passed as string\n if isinstance(self._value, str):\n return self._value\n\n return \",\".join(self._value)\n\n\nclass LowercaseStringAttribute(GitlabAttribute):\n def get_for_api(self):\n return str(self._value).lower()\n\n\nclass FileAttribute(GitlabAttribute):\n def get_file_name(self, attr_name=None):\n return attr_name\n\n\nclass ImageAttribute(FileAttribute):\n def get_file_name(self, attr_name=None):\n return \"%s.png\" % attr_name if attr_name else \"image.png\"\n", "path": "gitlab/types.py" } ]
diff --git a/gitlab/tests/test_types.py b/gitlab/tests/test_types.py index 5b9f2caf8..3613383de 100644 --- a/gitlab/tests/test_types.py +++ b/gitlab/tests/test_types.py @@ -51,11 +51,19 @@ def test_empty_input(self): o.set_from_cli(" ") self.assertEqual([], o.get()) - def test_get_for_api(self): + def test_get_for_api_from_cli(self): o = types.ListAttribute() o.set_from_cli("foo,bar,baz") self.assertEqual("foo,bar,baz", o.get_for_api()) + def test_get_for_api_from_list(self): + o = types.ListAttribute(["foo", "bar", "baz"]) + self.assertEqual("foo,bar,baz", o.get_for_api()) + + def test_get_for_api_does_not_split_string(self): + o = types.ListAttribute("foo") + self.assertEqual("foo", o.get_for_api()) + class TestLowercaseStringAttribute(unittest.TestCase): def test_get_for_api(self): diff --git a/gitlab/types.py b/gitlab/types.py index 525dc3043..e07d078e1 100644 --- a/gitlab/types.py +++ b/gitlab/types.py @@ -38,6 +38,10 @@ def set_from_cli(self, cli_value): self._value = [item.strip() for item in cli_value.split(",")] def get_for_api(self): + # Do not comma-split single value passed as string + if isinstance(self._value, str): + return self._value + return ",".join(self._value) diff --git a/tools/python_test_v4.py b/tools/python_test_v4.py index 69b0d3181..e0cb3a609 100644 --- a/tools/python_test_v4.py +++ b/tools/python_test_v4.py @@ -677,10 +677,17 @@ assert type(issue1.closed_by()) == list assert type(issue1.related_merge_requests()) == list -# issues labels and events +# issue labels label2 = admin_project.labels.create({"name": "label2", "color": "#aabbcc"}) issue1.labels = ["label2"] issue1.save() + +assert issue1 in admin_project.issues.list(labels=["label2"]) +assert issue1 in admin_project.issues.list(labels="label2") +assert issue1 in admin_project.issues.list(labels="Any") +assert issue1 not in admin_project.issues.list(labels="None") + +# issue events events = issue1.resourcelabelevents.list() assert events event = issue1.resourcelabelevents.get(events[0].id)
TileDB-Inc__TileDB-Py-214
Junk results using numpy structured arrays Hey, I am attempting to use a sparse multi attribute arrays but for some reason I seem to be getting junk results TileDB-Py version (0.33.4) Python version (3.6.9) Here is a reproducing example (apologies that it is not very minimal) ``` import numpy as np from numpy import array import tiledb import shutil from contextlib import suppress # define a domain and schema NOTES_DTYPE = [('swing', 'float32', 1), ('pitch', 'uint8', 1), ('duration', 'float32', 1), ('velocity', 'uint8', 1)] song_dim = tiledb.Dim(name="song_idx", domain=(0, np.iinfo(np.int32).max-1), tile=1, dtype=np.int32) track_dim = tiledb.Dim(name="track_idx", domain=(0, np.iinfo(np.int32).max-1), tile=1, dtype=np.int32) bar_dim = tiledb.Dim(name="bar_idx", domain=(0, np.iinfo(np.int32).max-1), tile=1, dtype=np.int32) beat_dim = tiledb.Dim(name="beat_no", domain=(0, 3), tile=1, dtype=np.int32) note_dim = tiledb.Dim(name="note_idx", domain=(0, np.iinfo(np.int32).max-1), tile=1, dtype=np.int32) notes_dom = tiledb.Domain( song_dim, track_dim, bar_dim, beat_dim, note_dim, ) notes_schema = tiledb.ArraySchema(domain=notes_dom, sparse=True, attrs=[ tiledb.Attr(name=prop_name, dtype=getattr(np, dtype)) for prop_name, dtype, _ in NOTES_DTYPE] ) # define some test data test_data = {'notes_coords': array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 1, 1, 1, 2, 2, 2, 2],[0, 1, 2, 0, 1, 2, 0, 1, 2, 3]]), 'notes_data': array([( 0. , 78, 0.25, 127), ( 0.25, 66, 0.25, 127),( 0.5 , 82, 0.25, 127), (-0.25, 66, 0.25, 127),( 0. , 80, 0.25, 127), ( 0.25, 66, 0.25, 127),(-0.5 , 72, 0.25, 127), (-0.25, 66, 0.25, 127),( 0. , 73, 0.25, 127), ( 0.25, 66, 0.25, 127)], dtype=[('swing', '<f4'), ('pitch', 'u1'), ('duration', '<f4'), ('velocity', 'u1')]) } # create array with suppress(FileNotFoundError): shutil.rmtree(f'test_tile') tiledb.SparseArray.create(f'test_tile', notes_schema) # write test data to array tile_array = lambda mode: tiledb.SparseArray('test_tile', mode=mode) stuctured_array_to_dict = lambda arr: {name: arr[name] for name in arr.dtype.names} with tile_array('w') as A: A[(*test_data['notes_coords'],)] = \ stuctured_array_to_dict(test_data['notes_data']) # read data back from array with tile_array('r') as A: from_tile = A[0] # none are matching assert any([(from_tile['velocity']==test_data['notes_data']['velocity']).all(), (from_tile['swing']==test_data['notes_data']['swing']).all(), (from_tile['pitch']==test_data['notes_data']['pitch']).all(), (from_tile['duration']==test_data['notes_data']['duration']).all()]) ``` upon inspecting these array it can be seen that the data in them looks to be uninitialized (floats are >1e40) Am I perhaps missing something important here?
[ { "content": "from __future__ import absolute_import, print_function\n\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport zipfile\nimport platform\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\n\ntry:\n # For Python 3\n from urllib.request import urlopen\n import io\n\n def get_zipfile(url):\n \"\"\"Returns a ZipFile constructed from the file at the given URL.\"\"\"\n r = urlopen(url)\n return zipfile.ZipFile(io.BytesIO(r.read()))\nexcept ImportError:\n # Python 2\n from urllib2 import urlopen\n import StringIO\n\n def get_zipfile(url):\n \"\"\"Returns a ZipFile constructed from the file at the given URL.\"\"\"\n r = urlopen(url)\n return zipfile.ZipFile(StringIO.StringIO(r.read()))\n\nfrom setuptools import setup, Extension, find_packages\nfrom pkg_resources import resource_filename\n\nimport sys\nfrom sys import version_info as ver\n\n# Target branch\nTILEDB_VERSION = \"dev\"\n# allow overriding w/ environment variable\nTILEDB_VERSION = os.environ.get(\"TILEDB_VERSION\") or TILEDB_VERSION\n\n# Use `setup.py [] --debug` for a debug build of libtiledb\nTILEDB_DEBUG_BUILD = False\n\n# Use `setup.py [] --modular` for a modular build of libtiledb_py\n# Each .pyx file will be built as a separate shared library for faster\n# compilation. This is disabled by default to avoid distributing multiple\n# shared libraries.\nTILEDBPY_MODULAR = False\n\n# Allow to override TILEDB_FORCE_ALL_DEPS with environment variable\nTILEDB_FORCE_ALL_DEPS = \"TILEDB_FORCE_ALL_DEPS\" in os.environ\nTILEDB_SERIALIZATION = \"TILEDB_SERIALIZATION\" in os.environ\nCMAKE_GENERATOR = os.environ.get(\"CMAKE_GENERATOR\", None)\n\n# Directory containing this file\nCONTAINING_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# Build directory path\nBUILD_DIR = os.path.join(CONTAINING_DIR, \"build\")\n\n# TileDB package source directory\nTILEDB_PKG_DIR = os.path.join(CONTAINING_DIR, \"tiledb\")\n\n# Set deployment target for mac\n#\n# Need to ensure thatextensions are built for macos 10.9 when compiling on a\n# 10.9 system or above, overriding distutils behaviour which is to target\n# the version used to build the current python binary.\n#\n# TO OVERRIDE:\n# set MACOSX_DEPLOYMENT_TARGET before calling setup.py\n#\n# From https://github.com/pandas-dev/pandas/pull/24274\n# 3-Clause BSD License: https://github.com/pandas-dev/pandas/blob/master/LICENSE\nif sys.platform == 'darwin':\n if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:\n current_system = LooseVersion(platform.mac_ver()[0])\n python_target = LooseVersion(\n get_config_var('MACOSX_DEPLOYMENT_TARGET'))\n if python_target < '10.9' and current_system >= '10.9':\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'\n\ndef is_windows():\n return os.name == 'nt'\n\ndef _libtiledb_exists(library_dirs):\n \"\"\"\n Checks the given list of paths and returns true if any contain the TileDB library.\n :return: The path to the TileDB library, or None.\n \"\"\"\n\n print(\"libtiledb_exists checking 'library_dirs': {}\".format(library_dirs))\n\n if len(library_dirs) > 0:\n names = libtiledb_library_names()\n paths = [os.path.join(d, n) for d in library_dirs for n in names]\n for p in paths:\n if os.path.exists(p):\n return p\n raise RuntimeError(\"Could not find given --tiledb library path(s):\\n{}\"\n .format(\"\\n\".join(paths)))\n # If no explicit path is given check to see if TileDB is globally installed.\n import ctypes\n if os.name == \"posix\":\n if sys.platform == \"darwin\":\n lib_name = \"libtiledb.dylib\"\n else:\n lib_name = \"libtiledb.so\"\n elif os.name == \"nt\":\n lib_name = \"tiledb.dll\"\n try:\n # note: this is a relative path on linux\n # https://bugs.python.org/issue21042\n ctypes.CDLL(lib_name)\n return lib_name\n except:\n pass\n\n return None\n\ndef libtiledb_exists(library_dirs):\n lib = _libtiledb_exists(library_dirs)\n print(\"libtiledb_exists found: '{}'\".format(lib))\n return lib\n\n\ndef libtiledb_library_names():\n \"\"\"\n :return: List of TileDB shared library names.\n \"\"\"\n if os.name == \"posix\":\n if sys.platform == \"darwin\":\n return [\"libtiledb.dylib\"]\n else:\n return [\"libtiledb.so\"]\n elif os.name == \"nt\":\n return [\"tiledb.dll\"]\n else:\n raise RuntimeError(\"Unsupported OS name \" + os.name)\n\n\ndef download_libtiledb():\n \"\"\"\n Downloads the native TileDB source.\n :return: Path to extracted source directory.\n \"\"\"\n dest_name = \"TileDB-{}\".format(TILEDB_VERSION)\n dest = os.path.join(BUILD_DIR, dest_name)\n if not os.path.exists(dest):\n url = \"https://github.com/TileDB-Inc/TileDB/archive/{}.zip\".format(TILEDB_VERSION)\n print(\"Downloading TileDB package from {}...\".format(TILEDB_VERSION))\n with get_zipfile(url) as z:\n z.extractall(BUILD_DIR)\n return dest\n\n\ndef build_libtiledb(src_dir):\n \"\"\"\n Builds and installs the native TileDB library.\n :param src_dir: Path to libtiledb source directory.\n :return: Path to the directory where the library was installed.\n \"\"\"\n libtiledb_build_dir = os.path.join(src_dir, \"build\")\n libtiledb_install_dir = os.path.join(src_dir, \"dist\")\n if not os.path.exists(libtiledb_build_dir):\n os.makedirs(libtiledb_build_dir)\n\n print(\"Building libtiledb in directory {}...\".format(libtiledb_build_dir))\n cmake = os.environ.get(\"CMAKE\", \"cmake\")\n cmake_cmd = [cmake,\n \"-DCMAKE_INSTALL_PREFIX={}\".format(libtiledb_install_dir),\n \"-DTILEDB_TESTS=OFF\",\n \"-DTILEDB_S3=ON\",\n \"-DTILEDB_HDFS={}\".format(\"ON\" if os.name == \"posix\" else \"OFF\"),\n \"-DTILEDB_INSTALL_LIBDIR=lib\",\n \"-DTILEDB_CPP_API=OFF\",\n \"-DTILEDB_FORCE_ALL_DEPS:BOOL={}\".format(\"ON\" if TILEDB_FORCE_ALL_DEPS else \"OFF\"),\n \"-DTILEDB_SERIALIZATION:BOOL={}\".format(\"ON\" if TILEDB_SERIALIZATION else \"OFF\")\n ]\n\n extra_cmake_args = os.environ.get(\"CMAKE_ARGS\", [])\n if extra_cmake_args:\n cmake_cmd.extend(extra_cmake_args.split())\n\n if TILEDB_DEBUG_BUILD:\n build_type = \"Debug\"\n else:\n build_type = \"Release\"\n\n cmake_cmd.append(\"-DCMAKE_BUILD_TYPE={}\".format(build_type))\n\n if os.name == 'nt':\n cmake_cmd.extend(['-A', 'x64', \"-DMSVC_MP_FLAG=/MP4\"])\n\n if CMAKE_GENERATOR:\n cmake_cmd.extend(['-G', CMAKE_GENERATOR])\n\n # cmake target directory -- important\n cmake_cmd.append(src_dir)\n\n print(\"CMake configure command: {}\".format(cmake_cmd))\n\n have_make = True\n try:\n subprocess.check_call([\"make\", \"-v\"])\n except:\n have_make = False\n\n if have_make and not os.name == 'nt':\n njobs = multiprocessing.cpu_count() or 2\n build_cmd = [\"make\", \"-j{:d}\".format(njobs)]\n install_cmd = [\"make\", \"install-tiledb\"]\n else:\n build_cmd = [\"cmake\", \"--build\", \".\", \"--config\", build_type]\n install_cmd = [\"cmake\", \"--build\", \".\", \"--config\", build_type, \"--target\", \"install-tiledb\"]\n\n # Build and install libtiledb\n # - run cmake\n # - run build via 'cmake --build'\n # - run install-tiledb\n subprocess.check_call(cmake_cmd, cwd=libtiledb_build_dir)\n subprocess.check_call(build_cmd, cwd=libtiledb_build_dir)\n subprocess.check_call(install_cmd, cwd=libtiledb_build_dir)\n\n if not 'TILEDB_PATH' in os.environ:\n os.environ['TILEDB_PATH'] = libtiledb_install_dir\n return libtiledb_install_dir\n\n\ndef find_or_install_libtiledb(setuptools_cmd):\n \"\"\"\n Find the TileDB library required for building the Cython extension. If not found,\n download, build and install TileDB, copying the resulting shared libraries\n into a path where they will be found by package_data.\n\n :param setuptools_cmd: The setuptools command instance.\n \"\"\"\n tiledb_ext = None\n for ext in setuptools_cmd.distribution.ext_modules:\n if ext.name == \"tiledb.libtiledb\":\n tiledb_ext = ext\n break\n\n # Download, build and locally install TileDB if needed.\n if not libtiledb_exists(tiledb_ext.library_dirs):\n src_dir = download_libtiledb()\n install_dir = build_libtiledb(src_dir)\n lib_subdir = 'bin' if os.name=='nt' else 'lib'\n native_subdir = '' if is_windows() else 'native'\n # Copy libtiledb shared object(s) to the package directory so they can be found\n # with package_data.\n dest_dir = os.path.join(TILEDB_PKG_DIR, native_subdir)\n for libname in libtiledb_library_names():\n src = os.path.join(install_dir, lib_subdir, libname)\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dest = os.path.join(dest_dir, libname)\n print(\"Copying file {0} to {1}\".format(src, dest))\n shutil.copy(src, dest)\n\n # TODO hack\n # also copy the lib file for dependees\n # this needs to come before\n if is_windows():\n def do_copy(src, dest):\n print(\"Copying file {0} to {1}\".format(src, dest))\n shutil.copy(src, dest)\n\n # lib files for linking\n src = os.path.join(install_dir, \"lib\", \"tiledb.lib\")\n dest = os.path.join(dest_dir, \"tiledb.lib\")\n do_copy(src, dest)\n\n # tbb\n src = os.path.join(install_dir, \"bin\", \"tbb.dll\")\n dest = os.path.join(dest_dir, \"tbb.dll\")\n do_copy(src, dest)\n src = os.path.join(install_dir, \"lib\", \"tbb.lib\")\n dest = os.path.join(dest_dir, \"tbb.lib\")\n do_copy(src, dest)\n\n #\n tiledb_ext.library_dirs += [os.path.join(install_dir, \"lib\")]\n\n # Update the TileDB Extension instance with correct paths.\n tiledb_ext.library_dirs += [os.path.join(install_dir, lib_subdir)]\n tiledb_ext.include_dirs += [os.path.join(install_dir, \"include\")]\n # Update package_data so the shared object gets installed with the Python module.\n libtiledb_objects = [os.path.join(native_subdir, libname) for libname in libtiledb_library_names()]\n if is_windows():\n libtiledb_objects.extend(\n [os.path.join(native_subdir, libname) for libname in\n [\"tiledb.lib\", \"tbb.dll\", \"tbb.lib\"]])\n print(\"libtiledb_objects: \", libtiledb_objects)\n setuptools_cmd.distribution.package_data.update({\"tiledb\": libtiledb_objects})\n\n\nclass LazyCommandClass(dict):\n \"\"\"\n Lazy command class that defers operations requiring Cython and numpy until\n they've actually been downloaded and installed by setup_requires.\n \"\"\"\n\n def __contains__(self, key):\n return (\n key in ['build_ext', 'bdist_wheel', 'bdist_egg']\n or super(LazyCommandClass, self).__contains__(key)\n )\n\n def __setitem__(self, key, value):\n if key == 'build_ext':\n raise AssertionError(\"build_ext overridden!\")\n super(LazyCommandClass, self).__setitem__(key, value)\n\n def __getitem__(self, key):\n if key == 'build_ext':\n return self.make_build_ext_cmd()\n elif key == 'bdist_wheel':\n return self.make_bdist_wheel_cmd()\n elif key == 'bdist_egg':\n return self.make_bdist_egg_cmd()\n else:\n return super(LazyCommandClass, self).__getitem__(key)\n\n def make_build_ext_cmd(self):\n \"\"\"\n :return: A command class implementing 'build_ext'.\n \"\"\"\n from Cython.Distutils import build_ext as cython_build_ext\n\n class build_ext(cython_build_ext):\n \"\"\"\n Custom build_ext command that lazily adds numpy's include_dir to\n extensions.\n \"\"\"\n\n def build_extensions(self):\n \"\"\"\n Lazily append numpy's include directory to Extension includes.\n\n This is done here rather than at module scope because setup.py\n may be run before numpy has been installed, in which case\n importing numpy and calling `numpy.get_include()` will fail.\n \"\"\"\n numpy_incl = resource_filename('numpy', 'core/include')\n for ext in self.extensions:\n ext.include_dirs.append(numpy_incl)\n\n find_or_install_libtiledb(self)\n\n # This explicitly calls the superclass method rather than the\n # usual super() invocation because distutils' build_class, of\n # which Cython's build_ext is a subclass, is an old-style class\n # in Python 2, which doesn't support `super`.\n cython_build_ext.build_extensions(self)\n\n return build_ext\n\n def make_bdist_wheel_cmd(self):\n \"\"\"\n :return: A command class implementing 'bdist_wheel'.\n \"\"\"\n from wheel.bdist_wheel import bdist_wheel\n\n class bdist_wheel_cmd(bdist_wheel):\n def run(self):\n # This may modify package_data:\n find_or_install_libtiledb(self)\n bdist_wheel.run(self)\n\n return bdist_wheel_cmd\n\n def make_bdist_egg_cmd(self):\n \"\"\"\n :return: A command class implementing 'bdist_egg'.\n \"\"\"\n from setuptools.command.bdist_egg import bdist_egg\n\n class bdist_egg_cmd(bdist_egg):\n def run(self):\n # This may modify package_data:\n find_or_install_libtiledb(self)\n bdist_egg.run(self)\n\n return bdist_egg_cmd\n\n\ndef cmake_available():\n \"\"\"\n Checks whether CMake command is available and >= version 3.3.\n :return:\n \"\"\"\n try:\n output = subprocess.check_output(['cmake', '--version']).split()\n version = output[2].decode('utf-8').split('.')\n return int(version[0]) >= 3 and int(version[1]) >= 3\n except:\n return False\n\nnumpy_required_version = 'numpy<=1.16' if sys.hexversion <0x3050000 else 'numpy>=1.7'\ndef setup_requires():\n req = ['cython>=0.27',\n numpy_required_version,\n 'setuptools>=18.0',\n 'setuptools_scm>=1.5.4',\n 'wheel>=0.30']\n # Add cmake requirement if libtiledb is not found and cmake is not available.\n if not libtiledb_exists(LIB_DIRS) and not cmake_available():\n req.append('cmake>=3.11.0')\n return req\n\n\nTESTS_REQUIRE = []\nif ver < (3,):\n TESTS_REQUIRE.extend([\"unittest2\", \"mock\"])\n\n# Global variables\nCXXFLAGS = os.environ.get(\"CXXFLAGS\", \"\").split()\nif not is_windows():\n CXXFLAGS.append(\"-std=c++11\")\n if not TILEDB_DEBUG_BUILD:\n CXXFLAGS.append(\"-Wno-deprecated-declarations\")\n\nLFLAGS = os.environ.get(\"LFLAGS\", \"\").split()\n\n# Allow setting (lib) TileDB directory if it is installed on the system\nTILEDB_PATH = os.environ.get(\"TILEDB_PATH\", \"\")\n\n# Sources & libraries\nINC_DIRS = []\nLIB_DIRS = []\nLIBS = [\"tiledb\"]\nDEF_MACROS = []\n\n# Pass command line flags to setup.py script\n# handle --tiledb=[PATH] --lflags=[FLAGS] --cxxflags=[FLAGS]\nargs = sys.argv[:]\nfor arg in args:\n if arg.find('--tiledb=') == 0:\n TILEDB_PATH = os.path.expanduser(arg.split('=')[1])\n sys.argv.remove(arg)\n if arg.find('--lflags=') == 0:\n LFLAGS = arg.split('=')[1].split()\n sys.argv.remove(arg)\n if arg.find('--cxxflags=') == 0:\n CXXFLAGS = arg.split('=')[1].split()\n sys.argv.remove(arg)\n if arg.find('--debug') == 0:\n TILEDB_DEBUG_BUILD = True\n sys.argv.remove(arg)\n if arg.find('--modular') == 0:\n TILEDBPY_MODULAR = True\n sys.argv.remove(arg)\n\nif TILEDB_PATH != '':\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'lib')]\n if sys.platform.startswith(\"linux\"):\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'lib64'),\n os.path.join(TILEDB_PATH, 'lib', 'x86_64-linux-gnu')]\n elif os.name == 'nt':\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'bin')]\n INC_DIRS += [os.path.join(TILEDB_PATH, 'include')]\n if sys.platform == 'darwin':\n LFLAGS += ['-Wl,-rpath,{}'.format(p) for p in LIB_DIRS]\n\nwith open('README.rst') as f:\n README_RST = f.read()\n\n# Source files for build\nMODULAR_SOURCES = [\n 'tiledb/np2buf.pyx',\n 'tiledb/indexing.pyx',\n ]\nMODULAR_HEADERS = [\n 'tiledb/libtiledb.pxd',\n 'tiledb/np2buf.pxd',\n 'tiledb/indexing.pxd'\n ]\n\n__extensions = [\n Extension(\n \"tiledb.libtiledb\",\n include_dirs=INC_DIRS,\n define_macros=DEF_MACROS,\n sources=[\"tiledb/libtiledb.pyx\"],\n depends=MODULAR_HEADERS,\n library_dirs=LIB_DIRS,\n libraries=LIBS,\n extra_link_args=LFLAGS,\n extra_compile_args=CXXFLAGS,\n language=\"c++\"\n )\n]\n\nif TILEDBPY_MODULAR:\n for source in MODULAR_SOURCES:\n module_name = os.path.splitext(os.path.split(source)[-1])[0]\n ext = Extension(\n \"tiledb.{}\".format(module_name),\n include_dirs=INC_DIRS,\n define_macros=DEF_MACROS,\n sources=[source],\n library_dirs=LIB_DIRS,\n libraries=LIBS,\n extra_link_args=LFLAGS,\n extra_compile_args=CXXFLAGS,\n language=\"c++\"\n )\n __extensions.append(ext)\nelse:\n __extensions[0].depends += MODULAR_SOURCES\n\n# Helper to set Extension attributes correctly based on python version\ndef ext_attr_update(attr, value):\n for x in __extensions:\n if sys.version_info < (3,0):\n x.__dict__[attr] = value\n else:\n x.__setattr__(attr, value)\n\n# Monkey patches to be forwarded to cythonize\n# some of these will error out if passed directly\n# to Extension(..) above\n\n# - build with `#line` directive annotations\n# (equivalent to `emit_linenums` command line directive)\next_attr_update('cython_line_directives', 1)\n\n# - generate XML debug mapping file (`cython_debug`)\nif TILEDB_DEBUG_BUILD:\n ext_attr_update('cython_gdb', True)\n# - set rt lib dirs to get correct RPATH on unixy platforms\n# note that we set rpath for darwin separately above.\nif not is_windows():\n ext_attr_update('runtime_library_dirs', LIB_DIRS)\n\n# This must always be set so the compile-time conditional has a value\next_attr_update('cython_compile_time_env', {'TILEDBPY_MODULAR': TILEDBPY_MODULAR})\n\nsetup(\n name='tiledb',\n description=\"Pythonic interface to the TileDB array storage manager\",\n long_description=README_RST,\n author='TileDB, Inc.',\n author_email='[email protected]',\n maintainer='TileDB, Inc.',\n maintainer_email='[email protected]',\n url='https://github.com/TileDB-Inc/TileDB-Py',\n license='MIT',\n platforms=['any'],\n use_scm_version={\n 'version_scheme': 'guess-next-dev',\n 'local_scheme': 'dirty-tag',\n 'write_to': 'tiledb/version.py'\n },\n ext_modules=__extensions,\n setup_requires=setup_requires(),\n install_requires=[\n numpy_required_version,\n 'wheel>=0.30'\n ],\n tests_require=TESTS_REQUIRE,\n packages=find_packages(),\n cmdclass=LazyCommandClass(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "from __future__ import absolute_import, print_function\n\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport zipfile\nimport platform\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\n\ntry:\n # For Python 3\n from urllib.request import urlopen\n import io\n\n def get_zipfile(url):\n \"\"\"Returns a ZipFile constructed from the file at the given URL.\"\"\"\n r = urlopen(url)\n return zipfile.ZipFile(io.BytesIO(r.read()))\nexcept ImportError:\n # Python 2\n from urllib2 import urlopen\n import StringIO\n\n def get_zipfile(url):\n \"\"\"Returns a ZipFile constructed from the file at the given URL.\"\"\"\n r = urlopen(url)\n return zipfile.ZipFile(StringIO.StringIO(r.read()))\n\nfrom setuptools import setup, Extension, find_packages\nfrom pkg_resources import resource_filename\n\nimport sys\nfrom sys import version_info as ver\n\n# Target branch\nTILEDB_VERSION = \"dev\"\n# allow overriding w/ environment variable\nTILEDB_VERSION = os.environ.get(\"TILEDB_VERSION\") or TILEDB_VERSION\n\n# Use `setup.py [] --debug` for a debug build of libtiledb\nTILEDB_DEBUG_BUILD = False\n\n# Use `setup.py [] --modular` for a modular build of libtiledb_py\n# Each .pyx file will be built as a separate shared library for faster\n# compilation. This is disabled by default to avoid distributing multiple\n# shared libraries.\nTILEDBPY_MODULAR = False\n\n# Allow to override TILEDB_FORCE_ALL_DEPS with environment variable\nTILEDB_FORCE_ALL_DEPS = \"TILEDB_FORCE_ALL_DEPS\" in os.environ\nTILEDB_SERIALIZATION = \"TILEDB_SERIALIZATION\" in os.environ\nCMAKE_GENERATOR = os.environ.get(\"CMAKE_GENERATOR\", None)\n\n# Directory containing this file\nCONTAINING_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# Build directory path\nBUILD_DIR = os.path.join(CONTAINING_DIR, \"build\")\n\n# TileDB package source directory\nTILEDB_PKG_DIR = os.path.join(CONTAINING_DIR, \"tiledb\")\n\n# Set deployment target for mac\n#\n# Need to ensure thatextensions are built for macos 10.9 when compiling on a\n# 10.9 system or above, overriding distutils behaviour which is to target\n# the version used to build the current python binary.\n#\n# TO OVERRIDE:\n# set MACOSX_DEPLOYMENT_TARGET before calling setup.py\n#\n# From https://github.com/pandas-dev/pandas/pull/24274\n# 3-Clause BSD License: https://github.com/pandas-dev/pandas/blob/master/LICENSE\nif sys.platform == 'darwin':\n if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:\n current_system = LooseVersion(platform.mac_ver()[0])\n python_target = LooseVersion(\n get_config_var('MACOSX_DEPLOYMENT_TARGET'))\n if python_target < '10.9' and current_system >= '10.9':\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'\n\ndef is_windows():\n return os.name == 'nt'\n\ndef _libtiledb_exists(library_dirs):\n \"\"\"\n Checks the given list of paths and returns true if any contain the TileDB library.\n :return: The path to the TileDB library, or None.\n \"\"\"\n\n print(\"libtiledb_exists checking 'library_dirs': {}\".format(library_dirs))\n\n if len(library_dirs) > 0:\n names = libtiledb_library_names()\n paths = [os.path.join(d, n) for d in library_dirs for n in names]\n for p in paths:\n if os.path.exists(p):\n return p\n raise RuntimeError(\"Could not find given --tiledb library path(s):\\n{}\"\n .format(\"\\n\".join(paths)))\n # If no explicit path is given check to see if TileDB is globally installed.\n import ctypes\n if os.name == \"posix\":\n if sys.platform == \"darwin\":\n lib_name = \"libtiledb.dylib\"\n else:\n lib_name = \"libtiledb.so\"\n elif os.name == \"nt\":\n lib_name = \"tiledb.dll\"\n try:\n # note: this is a relative path on linux\n # https://bugs.python.org/issue21042\n ctypes.CDLL(lib_name)\n return lib_name\n except:\n pass\n\n return None\n\ndef libtiledb_exists(library_dirs):\n lib = _libtiledb_exists(library_dirs)\n print(\"libtiledb_exists found: '{}'\".format(lib))\n return lib\n\n\ndef libtiledb_library_names():\n \"\"\"\n :return: List of TileDB shared library names.\n \"\"\"\n if os.name == \"posix\":\n if sys.platform == \"darwin\":\n return [\"libtiledb.dylib\"]\n else:\n return [\"libtiledb.so\"]\n elif os.name == \"nt\":\n return [\"tiledb.dll\"]\n else:\n raise RuntimeError(\"Unsupported OS name \" + os.name)\n\n\ndef download_libtiledb():\n \"\"\"\n Downloads the native TileDB source.\n :return: Path to extracted source directory.\n \"\"\"\n dest_name = \"TileDB-{}\".format(TILEDB_VERSION)\n dest = os.path.join(BUILD_DIR, dest_name)\n if not os.path.exists(dest):\n url = \"https://github.com/TileDB-Inc/TileDB/archive/{}.zip\".format(TILEDB_VERSION)\n print(\"Downloading TileDB package from {}...\".format(TILEDB_VERSION))\n with get_zipfile(url) as z:\n z.extractall(BUILD_DIR)\n return dest\n\n\ndef build_libtiledb(src_dir):\n \"\"\"\n Builds and installs the native TileDB library.\n :param src_dir: Path to libtiledb source directory.\n :return: Path to the directory where the library was installed.\n \"\"\"\n libtiledb_build_dir = os.path.join(src_dir, \"build\")\n libtiledb_install_dir = os.path.join(src_dir, \"dist\")\n if not os.path.exists(libtiledb_build_dir):\n os.makedirs(libtiledb_build_dir)\n\n print(\"Building libtiledb in directory {}...\".format(libtiledb_build_dir))\n cmake = os.environ.get(\"CMAKE\", \"cmake\")\n cmake_cmd = [cmake,\n \"-DCMAKE_INSTALL_PREFIX={}\".format(libtiledb_install_dir),\n \"-DTILEDB_TESTS=OFF\",\n \"-DTILEDB_S3=ON\",\n \"-DTILEDB_HDFS={}\".format(\"ON\" if os.name == \"posix\" else \"OFF\"),\n \"-DTILEDB_INSTALL_LIBDIR=lib\",\n \"-DTILEDB_CPP_API=OFF\",\n \"-DTILEDB_FORCE_ALL_DEPS:BOOL={}\".format(\"ON\" if TILEDB_FORCE_ALL_DEPS else \"OFF\"),\n \"-DTILEDB_SERIALIZATION:BOOL={}\".format(\"ON\" if TILEDB_SERIALIZATION else \"OFF\")\n ]\n\n extra_cmake_args = os.environ.get(\"CMAKE_ARGS\", [])\n if extra_cmake_args:\n cmake_cmd.extend(extra_cmake_args.split())\n\n if TILEDB_DEBUG_BUILD:\n build_type = \"Debug\"\n else:\n build_type = \"Release\"\n\n cmake_cmd.append(\"-DCMAKE_BUILD_TYPE={}\".format(build_type))\n\n if os.name == 'nt':\n cmake_cmd.extend(['-A', 'x64', \"-DMSVC_MP_FLAG=/MP4\"])\n\n if CMAKE_GENERATOR:\n cmake_cmd.extend(['-G', CMAKE_GENERATOR])\n\n # cmake target directory -- important\n cmake_cmd.append(src_dir)\n\n print(\"CMake configure command: {}\".format(cmake_cmd))\n\n have_make = True\n try:\n subprocess.check_call([\"make\", \"-v\"])\n except:\n have_make = False\n\n if have_make and not os.name == 'nt':\n njobs = multiprocessing.cpu_count() or 2\n build_cmd = [\"make\", \"-j{:d}\".format(njobs)]\n install_cmd = [\"make\", \"install-tiledb\"]\n else:\n build_cmd = [\"cmake\", \"--build\", \".\", \"--config\", build_type]\n install_cmd = [\"cmake\", \"--build\", \".\", \"--config\", build_type, \"--target\", \"install-tiledb\"]\n\n # Build and install libtiledb\n # - run cmake\n # - run build via 'cmake --build'\n # - run install-tiledb\n subprocess.check_call(cmake_cmd, cwd=libtiledb_build_dir)\n subprocess.check_call(build_cmd, cwd=libtiledb_build_dir)\n subprocess.check_call(install_cmd, cwd=libtiledb_build_dir)\n\n if not 'TILEDB_PATH' in os.environ:\n os.environ['TILEDB_PATH'] = libtiledb_install_dir\n return libtiledb_install_dir\n\n\ndef find_or_install_libtiledb(setuptools_cmd):\n \"\"\"\n Find the TileDB library required for building the Cython extension. If not found,\n download, build and install TileDB, copying the resulting shared libraries\n into a path where they will be found by package_data.\n\n :param setuptools_cmd: The setuptools command instance.\n \"\"\"\n tiledb_ext = None\n for ext in setuptools_cmd.distribution.ext_modules:\n if ext.name == \"tiledb.libtiledb\":\n tiledb_ext = ext\n break\n\n # Download, build and locally install TileDB if needed.\n if not libtiledb_exists(tiledb_ext.library_dirs):\n src_dir = download_libtiledb()\n install_dir = build_libtiledb(src_dir)\n lib_subdir = 'bin' if os.name=='nt' else 'lib'\n native_subdir = '' if is_windows() else 'native'\n # Copy libtiledb shared object(s) to the package directory so they can be found\n # with package_data.\n dest_dir = os.path.join(TILEDB_PKG_DIR, native_subdir)\n for libname in libtiledb_library_names():\n src = os.path.join(install_dir, lib_subdir, libname)\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dest = os.path.join(dest_dir, libname)\n print(\"Copying file {0} to {1}\".format(src, dest))\n shutil.copy(src, dest)\n\n # TODO hack\n # also copy the lib file for dependees\n # this needs to come before\n if is_windows():\n def do_copy(src, dest):\n print(\"Copying file {0} to {1}\".format(src, dest))\n shutil.copy(src, dest)\n\n # lib files for linking\n src = os.path.join(install_dir, \"lib\", \"tiledb.lib\")\n dest = os.path.join(dest_dir, \"tiledb.lib\")\n do_copy(src, dest)\n\n # tbb\n src = os.path.join(install_dir, \"bin\", \"tbb.dll\")\n dest = os.path.join(dest_dir, \"tbb.dll\")\n do_copy(src, dest)\n src = os.path.join(install_dir, \"lib\", \"tbb.lib\")\n dest = os.path.join(dest_dir, \"tbb.lib\")\n do_copy(src, dest)\n\n #\n tiledb_ext.library_dirs += [os.path.join(install_dir, \"lib\")]\n\n # Update the TileDB Extension instance with correct paths.\n tiledb_ext.library_dirs += [os.path.join(install_dir, lib_subdir)]\n tiledb_ext.include_dirs += [os.path.join(install_dir, \"include\")]\n # Update package_data so the shared object gets installed with the Python module.\n libtiledb_objects = [os.path.join(native_subdir, libname) for libname in libtiledb_library_names()]\n if is_windows():\n libtiledb_objects.extend(\n [os.path.join(native_subdir, libname) for libname in\n [\"tiledb.lib\", \"tbb.dll\", \"tbb.lib\"]])\n print(\"libtiledb_objects: \", libtiledb_objects)\n setuptools_cmd.distribution.package_data.update({\"tiledb\": libtiledb_objects})\n\n\nclass LazyCommandClass(dict):\n \"\"\"\n Lazy command class that defers operations requiring Cython and numpy until\n they've actually been downloaded and installed by setup_requires.\n \"\"\"\n\n def __contains__(self, key):\n return (\n key in ['build_ext', 'bdist_wheel', 'bdist_egg']\n or super(LazyCommandClass, self).__contains__(key)\n )\n\n def __setitem__(self, key, value):\n if key == 'build_ext':\n raise AssertionError(\"build_ext overridden!\")\n super(LazyCommandClass, self).__setitem__(key, value)\n\n def __getitem__(self, key):\n if key == 'build_ext':\n return self.make_build_ext_cmd()\n elif key == 'bdist_wheel':\n return self.make_bdist_wheel_cmd()\n elif key == 'bdist_egg':\n return self.make_bdist_egg_cmd()\n else:\n return super(LazyCommandClass, self).__getitem__(key)\n\n def make_build_ext_cmd(self):\n \"\"\"\n :return: A command class implementing 'build_ext'.\n \"\"\"\n from Cython.Distutils import build_ext as cython_build_ext\n\n class build_ext(cython_build_ext):\n \"\"\"\n Custom build_ext command that lazily adds numpy's include_dir to\n extensions.\n \"\"\"\n\n def build_extensions(self):\n \"\"\"\n Lazily append numpy's include directory to Extension includes.\n\n This is done here rather than at module scope because setup.py\n may be run before numpy has been installed, in which case\n importing numpy and calling `numpy.get_include()` will fail.\n \"\"\"\n numpy_incl = resource_filename('numpy', 'core/include')\n for ext in self.extensions:\n ext.include_dirs.append(numpy_incl)\n\n find_or_install_libtiledb(self)\n\n # This explicitly calls the superclass method rather than the\n # usual super() invocation because distutils' build_class, of\n # which Cython's build_ext is a subclass, is an old-style class\n # in Python 2, which doesn't support `super`.\n cython_build_ext.build_extensions(self)\n\n return build_ext\n\n def make_bdist_wheel_cmd(self):\n \"\"\"\n :return: A command class implementing 'bdist_wheel'.\n \"\"\"\n from wheel.bdist_wheel import bdist_wheel\n\n class bdist_wheel_cmd(bdist_wheel):\n def run(self):\n # This may modify package_data:\n find_or_install_libtiledb(self)\n bdist_wheel.run(self)\n\n return bdist_wheel_cmd\n\n def make_bdist_egg_cmd(self):\n \"\"\"\n :return: A command class implementing 'bdist_egg'.\n \"\"\"\n from setuptools.command.bdist_egg import bdist_egg\n\n class bdist_egg_cmd(bdist_egg):\n def run(self):\n # This may modify package_data:\n find_or_install_libtiledb(self)\n bdist_egg.run(self)\n\n return bdist_egg_cmd\n\n\ndef cmake_available():\n \"\"\"\n Checks whether CMake command is available and >= version 3.3.\n :return:\n \"\"\"\n try:\n output = subprocess.check_output(['cmake', '--version']).split()\n version = output[2].decode('utf-8').split('.')\n return int(version[0]) >= 3 and int(version[1]) >= 3\n except:\n return False\n\nnumpy_required_version = 'numpy<=1.16' if sys.hexversion <0x3050000 else 'numpy>=1.7'\ndef setup_requires():\n req = ['cython>=0.27',\n numpy_required_version,\n 'setuptools>=18.0',\n 'setuptools_scm>=1.5.4',\n 'wheel>=0.30']\n # Add cmake requirement if libtiledb is not found and cmake is not available.\n if not libtiledb_exists(LIB_DIRS) and not cmake_available():\n req.append('cmake>=3.11.0')\n return req\n\n\nTESTS_REQUIRE = []\nif ver < (3,):\n TESTS_REQUIRE.extend([\"unittest2\", \"mock\"])\n\n# Global variables\nCXXFLAGS = os.environ.get(\"CXXFLAGS\", \"\").split()\nif not is_windows():\n CXXFLAGS.append(\"-std=c++11\")\n if not TILEDB_DEBUG_BUILD:\n CXXFLAGS.append(\"-Wno-deprecated-declarations\")\n\nLFLAGS = os.environ.get(\"LFLAGS\", \"\").split()\n\n# Allow setting (lib) TileDB directory if it is installed on the system\nTILEDB_PATH = os.environ.get(\"TILEDB_PATH\", \"\")\n\n# Sources & libraries\nINC_DIRS = []\nLIB_DIRS = []\nLIBS = [\"tiledb\"]\nDEF_MACROS = []\n\n# Pass command line flags to setup.py script\n# handle --tiledb=[PATH] --lflags=[FLAGS] --cxxflags=[FLAGS]\nargs = sys.argv[:]\nfor arg in args:\n if arg.find('--tiledb=') == 0:\n TILEDB_PATH = os.path.expanduser(arg.split('=')[1])\n sys.argv.remove(arg)\n if arg.find('--lflags=') == 0:\n LFLAGS = arg.split('=')[1].split()\n sys.argv.remove(arg)\n if arg.find('--cxxflags=') == 0:\n CXXFLAGS = arg.split('=')[1].split()\n sys.argv.remove(arg)\n if arg.find('--debug') == 0:\n TILEDB_DEBUG_BUILD = True\n sys.argv.remove(arg)\n if arg.find('--modular') == 0:\n TILEDBPY_MODULAR = True\n sys.argv.remove(arg)\n\nif TILEDB_PATH != '':\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'lib')]\n if sys.platform.startswith(\"linux\"):\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'lib64'),\n os.path.join(TILEDB_PATH, 'lib', 'x86_64-linux-gnu')]\n elif os.name == 'nt':\n LIB_DIRS += [os.path.join(TILEDB_PATH, 'bin')]\n INC_DIRS += [os.path.join(TILEDB_PATH, 'include')]\n if sys.platform == 'darwin':\n LFLAGS += ['-Wl,-rpath,{}'.format(p) for p in LIB_DIRS]\n\nwith open('README.rst') as f:\n README_RST = f.read()\n\n# Source files for build\nMODULAR_SOURCES = [\n 'tiledb/np2buf.pyx',\n 'tiledb/indexing.pyx',\n ]\nMODULAR_HEADERS = [\n 'tiledb/libtiledb.pxd',\n 'tiledb/np2buf.pxd',\n 'tiledb/indexing.pxd'\n ]\n\n__extensions = [\n Extension(\n \"tiledb.libtiledb\",\n include_dirs=INC_DIRS,\n define_macros=DEF_MACROS,\n sources=[\"tiledb/libtiledb.pyx\"],\n depends=MODULAR_HEADERS,\n library_dirs=LIB_DIRS,\n libraries=LIBS,\n extra_link_args=LFLAGS,\n extra_compile_args=CXXFLAGS,\n language=\"c++\"\n )\n]\n\nif TILEDBPY_MODULAR:\n for source in MODULAR_SOURCES:\n module_name = os.path.splitext(os.path.split(source)[-1])[0]\n ext = Extension(\n \"tiledb.{}\".format(module_name),\n include_dirs=INC_DIRS,\n define_macros=DEF_MACROS,\n sources=[source],\n library_dirs=LIB_DIRS,\n libraries=LIBS,\n extra_link_args=LFLAGS,\n extra_compile_args=CXXFLAGS,\n language=\"c++\"\n )\n __extensions.append(ext)\nelse:\n __extensions[0].depends += MODULAR_SOURCES\n\n# Helper to set Extension attributes correctly based on python version\ndef ext_attr_update(attr, value):\n for x in __extensions:\n if sys.version_info < (3,0):\n x.__dict__[attr] = value\n else:\n x.__setattr__(attr, value)\n\n# Monkey patches to be forwarded to cythonize\n# some of these will error out if passed directly\n# to Extension(..) above\n\n# - build with `#line` directive annotations\n# (equivalent to `emit_linenums` command line directive)\next_attr_update('cython_line_directives', 1)\n\n# - generate XML debug mapping file (`cython_debug`)\nif TILEDB_DEBUG_BUILD:\n ext_attr_update('cython_gdb', True)\n# - set rt lib dirs to get correct RPATH on unixy platforms\n# note that we set rpath for darwin separately above.\nif not is_windows():\n ext_attr_update('runtime_library_dirs', LIB_DIRS)\n\n# This must always be set so the compile-time conditional has a value\next_attr_update('cython_compile_time_env', {'TILEDBPY_MODULAR': TILEDBPY_MODULAR})\n\nsetup(\n name='tiledb',\n description=\"Pythonic interface to the TileDB array storage manager\",\n long_description=README_RST,\n author='TileDB, Inc.',\n author_email='[email protected]',\n maintainer='TileDB, Inc.',\n maintainer_email='[email protected]',\n url='https://github.com/TileDB-Inc/TileDB-Py',\n license='MIT',\n platforms=['any'],\n use_scm_version={\n 'version_scheme': 'guess-next-dev',\n 'local_scheme': 'dirty-tag',\n 'write_to': 'tiledb/version.py'\n },\n ext_modules=__extensions,\n setup_requires=setup_requires(),\n install_requires=[\n numpy_required_version,\n 'wheel>=0.30'\n ],\n tests_require=TESTS_REQUIRE,\n packages=find_packages(),\n cmdclass=LazyCommandClass(),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 05b890b247..2b70d6602d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -32,7 +32,7 @@ steps: displayName: 'Print env' - script: | - python -m pip install --upgrade pip setuptools wheel numpy tox setuptools-scm cython psutil dask + python -m pip install --upgrade setuptools wheel numpy tox setuptools-scm cython psutil dask displayName: 'Install dependencies' - script: | diff --git a/requirements.txt b/requirements.txt index 52835b1948..3379637831 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,5 @@ cython>=0.27.3 numpy==1.16.* setuptools>=18.0.1 setuptools-scm>=1.5.4 -wheel>=0.30.0 +wheel>=0.30 psutil diff --git a/requirements_dev.txt b/requirements_dev.txt index ba32c50b95..a5b2db4fa6 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -5,6 +5,6 @@ cython==0.27.3 numpy==1.16.* setuptools==40.8.0 setuptools-scm==1.5.4 -wheel==0.30.0 +wheel>=0.30.0 tox==3.0.0 psutil diff --git a/setup.py b/setup.py index 778cb8386d..ef4bd7ca7a 100644 --- a/setup.py +++ b/setup.py @@ -564,6 +564,7 @@ def ext_attr_update(attr, value): tests_require=TESTS_REQUIRE, packages=find_packages(), cmdclass=LazyCommandClass(), + zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', diff --git a/tiledb/libtiledb.pyx b/tiledb/libtiledb.pyx index db7226edc4..29edd0c891 100644 --- a/tiledb/libtiledb.pyx +++ b/tiledb/libtiledb.pyx @@ -4757,7 +4757,7 @@ cdef class SparseArray(Array): if self.schema.nattr == 1 and not isinstance(val, dict): attr = self.attr(0) name = attr.name - value = np.asarray(val, dtype=attr.dtype) + value = np.ascontiguousarray(val, dtype=attr.dtype) if len(value) != ncells: raise ValueError("value length does not match coordinate length") sparse_attributes.append(name) @@ -4766,7 +4766,7 @@ cdef class SparseArray(Array): for (k, v) in dict(val).items(): attr = self.attr(k) name = attr.name - value = v if attr.dtype is 'O' else np.asarray(v, dtype=attr.dtype) + value = v if attr.dtype is 'O' else np.ascontiguousarray(v, dtype=attr.dtype) if len(value) != ncells: raise ValueError("value length does not match coordinate length") sparse_attributes.append(name) diff --git a/tiledb/np2buf.pyx b/tiledb/np2buf.pyx index 88de926599..c076b19ab3 100644 --- a/tiledb/np2buf.pyx +++ b/tiledb/np2buf.pyx @@ -64,7 +64,7 @@ def array_to_buffer(object val): assert((arr.dtype == np.dtype('O') or np.issubdtype(arr.dtype, np.bytes_) or np.issubdtype(arr.dtype, np.unicode_)), - "_pack_varlen_bytes: input array must be np.object or np.bytes!") + "array_to_buffer: input array must be np.object or np.bytes!") firstdtype = _varlen_cell_dtype(arr.flat[0]) # item size
pennersr__django-allauth-967
TypeError: <function save at 0x7f9b4eab48c0> is not JSON serializable I'm trying to write Slack provider plugin but just can't get to log in. I'm constantly seeing the error below. Oddly, I was able to login twice out of 20-30 attempts I made and I didn't really make any changes to the code. This is also happening if I try to login using bundled LinkedIn oAuth2 provider. I've no clue what is going on. The same code and requirements work on my staging server just fine. I also reset my database to make sure it's is not corrupt data but that didn't fix it either. No user, social account or social token is created. I can share the preliminary code I wrote for slack provider but I doubt that has got anything to do with it since it happens with the linkedin provider also. I tried version 0.15, 0.20 and master. Same thing. Any pointers? ``` python Environment: Request Method: GET Request URL: http://localhost:8000/accounts/slack/login/callback/?code=xxxxxxxxxx&state=xxxxxxx Django Version: 1.7.7 Python Version: 2.7.3 Traceback: File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py" in get_response 111. response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/providers/oauth2/views.py" in view 55. return self.dispatch(request, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/providers/oauth2/views.py" in dispatch 125. return complete_social_login(request, login) File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/helpers.py" in complete_social_login 145. return _complete_social_login(request, sociallogin) File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/helpers.py" in _complete_social_login 161. ret = _process_signup(request, sociallogin) File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/helpers.py" in _process_signup 27. request.session['socialaccount_sociallogin'] = sociallogin.serialize() File "/usr/local/lib/python2.7/dist-packages/allauth/socialaccount/models.py" in serialize 187. user=serialize_instance(self.user), File "/usr/local/lib/python2.7/dist-packages/allauth/utils.py" in serialize_instance 162. return json.loads(json.dumps(ret, cls=DjangoJSONEncoder)) File "/usr/lib/python2.7/json/__init__.py" in dumps 238. **kw).encode(obj) File "/usr/lib/python2.7/json/encoder.py" in encode 201. chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python2.7/json/encoder.py" in iterencode 264. return _iterencode(o, 0) File "/usr/local/lib/python2.7/dist-packages/django/core/serializers/json.py" in default 109. return super(DjangoJSONEncoder, self).default(o) File "/usr/lib/python2.7/json/encoder.py" in default 178. raise TypeError(repr(o) + " is not JSON serializable") Exception Type: TypeError at /accounts/slack/login/callback/ Exception Value: <function save at 0x7f0ac718c8c0> is not JSON serializable ```
[ { "content": "import re\nimport unicodedata\nimport json\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.validators import validate_email, ValidationError\nfrom django.core import urlresolvers\nfrom django.contrib.sites.models import Site\nfrom django.db.models import FieldDoesNotExist\nfrom django.db.models.fields import (DateTimeField, DateField,\n EmailField, TimeField)\nfrom django.utils import six, dateparse\nfrom django.utils.datastructures import SortedDict\nfrom django.core.serializers.json import DjangoJSONEncoder\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\ntry:\n import importlib\nexcept:\n from django.utils import importlib\n\n\ndef _generate_unique_username_base(txts, regex=None):\n username = None\n regex = regex or '[^\\w\\s@+.-]'\n for txt in txts:\n if not txt:\n continue\n username = unicodedata.normalize('NFKD', force_text(txt))\n username = username.encode('ascii', 'ignore').decode('ascii')\n username = force_text(re.sub(regex, '', username).lower())\n # Django allows for '@' in usernames in order to accomodate for\n # project wanting to use e-mail for username. In allauth we don't\n # use this, we already have a proper place for putting e-mail\n # addresses (EmailAddress), so let's not use the full e-mail\n # address and only take the part leading up to the '@'.\n username = username.split('@')[0]\n username = username.strip()\n username = re.sub('\\s+', '_', username)\n if username:\n break\n return username or 'user'\n\n\ndef get_username_max_length():\n from .account.app_settings import USER_MODEL_USERNAME_FIELD\n if USER_MODEL_USERNAME_FIELD is not None:\n User = get_user_model()\n max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length\n else:\n max_length = 0\n return max_length\n\n\ndef generate_unique_username(txts, regex=None):\n from .account.app_settings import USER_MODEL_USERNAME_FIELD\n username = _generate_unique_username_base(txts, regex)\n User = get_user_model()\n max_length = get_username_max_length()\n i = 0\n while True:\n try:\n if i:\n pfx = str(i + 1)\n else:\n pfx = ''\n ret = username[0:max_length - len(pfx)] + pfx\n query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}\n User.objects.get(**query)\n i += 1\n except User.DoesNotExist:\n return ret\n\n\ndef valid_email_or_none(email):\n ret = None\n try:\n if email:\n validate_email(email)\n if len(email) <= EmailField().max_length:\n ret = email\n except ValidationError:\n pass\n return ret\n\n\ndef email_address_exists(email, exclude_user=None):\n from .account import app_settings as account_settings\n from .account.models import EmailAddress\n\n emailaddresses = EmailAddress.objects\n if exclude_user:\n emailaddresses = emailaddresses.exclude(user=exclude_user)\n ret = emailaddresses.filter(email__iexact=email).exists()\n if not ret:\n email_field = account_settings.USER_MODEL_EMAIL_FIELD\n if email_field:\n users = get_user_model().objects\n if exclude_user:\n users = users.exclude(pk=exclude_user.pk)\n ret = users.filter(**{email_field+'__iexact': email}).exists()\n return ret\n\n\ndef import_attribute(path):\n assert isinstance(path, six.string_types)\n pkg, attr = path.rsplit('.', 1)\n ret = getattr(importlib.import_module(pkg), attr)\n return ret\n\n\ndef import_callable(path_or_callable):\n if not hasattr(path_or_callable, '__call__'):\n ret = import_attribute(path_or_callable)\n else:\n ret = path_or_callable\n return ret\n\ntry:\n from django.contrib.auth import get_user_model\nexcept ImportError:\n # To keep compatibility with Django 1.4\n def get_user_model():\n from . import app_settings\n from django.db.models import get_model\n\n try:\n app_label, model_name = app_settings.USER_MODEL.split('.')\n except ValueError:\n raise ImproperlyConfigured(\"AUTH_USER_MODEL must be of the\"\n \" form 'app_label.model_name'\")\n user_model = get_model(app_label, model_name)\n if user_model is None:\n raise ImproperlyConfigured(\"AUTH_USER_MODEL refers to model\"\n \" '%s' that has not been installed\"\n % app_settings.USER_MODEL)\n return user_model\n\n\ndef get_current_site(request=None):\n \"\"\"Wrapper around ``Site.objects.get_current`` to handle ``Site`` lookups\n by request in Django >= 1.8.\n\n :param request: optional request object\n :type request: :class:`django.http.HttpRequest`\n \"\"\"\n # >= django 1.8\n if request and hasattr(Site.objects, '_get_site_by_request'):\n site = Site.objects.get_current(request=request)\n else:\n site = Site.objects.get_current()\n\n return site\n\n\ndef resolve_url(to):\n \"\"\"\n Subset of django.shortcuts.resolve_url (that one is 1.5+)\n \"\"\"\n try:\n return urlresolvers.reverse(to)\n except urlresolvers.NoReverseMatch:\n # If this doesn't \"feel\" like a URL, re-raise.\n if '/' not in to and '.' not in to:\n raise\n # Finally, fall back and assume it's a URL\n return to\n\n\ndef serialize_instance(instance):\n \"\"\"\n Since Django 1.6 items added to the session are no longer pickled,\n but JSON encoded by default. We are storing partially complete models\n in the session (user, account, token, ...). We cannot use standard\n Django serialization, as these are models are not \"complete\" yet.\n Serialization will start complaining about missing relations et al.\n \"\"\"\n ret = dict([(k, v)\n for k, v in instance.__dict__.items()\n if not k.startswith('_')])\n return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))\n\n\ndef deserialize_instance(model, data):\n ret = model()\n for k, v in data.items():\n if v is not None:\n try:\n f = model._meta.get_field(k)\n if isinstance(f, DateTimeField):\n v = dateparse.parse_datetime(v)\n elif isinstance(f, TimeField):\n v = dateparse.parse_time(v)\n elif isinstance(f, DateField):\n v = dateparse.parse_date(v)\n except FieldDoesNotExist:\n pass\n setattr(ret, k, v)\n return ret\n\n\ndef set_form_field_order(form, fields_order):\n if isinstance(form.fields, SortedDict):\n form.fields.keyOrder = fields_order\n else:\n # Python 2.7+\n from collections import OrderedDict\n assert isinstance(form.fields, OrderedDict)\n form.fields = OrderedDict((f, form.fields[f])\n for f in fields_order)\n\n\ndef build_absolute_uri(request, location, protocol=None):\n uri = request.build_absolute_uri(location)\n if protocol:\n uri = protocol + ':' + uri.partition(':')[2]\n return uri\n\n\ndef get_form_class(forms, form_id, default_form):\n form_class = forms.get(form_id, default_form)\n if isinstance(form_class, six.string_types):\n form_class = import_attribute(form_class)\n return form_class\n\n\ndef get_request_param(request, param, default=None):\n return request.POST.get(param) or request.GET.get(param, default)\n", "path": "allauth/utils.py" } ]
[ { "content": "import re\nimport unicodedata\nimport json\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.validators import validate_email, ValidationError\nfrom django.core import urlresolvers\nfrom django.contrib.sites.models import Site\nfrom django.db.models import FieldDoesNotExist\nfrom django.db.models.fields import (DateTimeField, DateField,\n EmailField, TimeField)\nfrom django.utils import six, dateparse\nfrom django.utils.datastructures import SortedDict\nfrom django.core.serializers.json import DjangoJSONEncoder\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\ntry:\n import importlib\nexcept:\n from django.utils import importlib\n\n\ndef _generate_unique_username_base(txts, regex=None):\n username = None\n regex = regex or '[^\\w\\s@+.-]'\n for txt in txts:\n if not txt:\n continue\n username = unicodedata.normalize('NFKD', force_text(txt))\n username = username.encode('ascii', 'ignore').decode('ascii')\n username = force_text(re.sub(regex, '', username).lower())\n # Django allows for '@' in usernames in order to accomodate for\n # project wanting to use e-mail for username. In allauth we don't\n # use this, we already have a proper place for putting e-mail\n # addresses (EmailAddress), so let's not use the full e-mail\n # address and only take the part leading up to the '@'.\n username = username.split('@')[0]\n username = username.strip()\n username = re.sub('\\s+', '_', username)\n if username:\n break\n return username or 'user'\n\n\ndef get_username_max_length():\n from .account.app_settings import USER_MODEL_USERNAME_FIELD\n if USER_MODEL_USERNAME_FIELD is not None:\n User = get_user_model()\n max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length\n else:\n max_length = 0\n return max_length\n\n\ndef generate_unique_username(txts, regex=None):\n from .account.app_settings import USER_MODEL_USERNAME_FIELD\n username = _generate_unique_username_base(txts, regex)\n User = get_user_model()\n max_length = get_username_max_length()\n i = 0\n while True:\n try:\n if i:\n pfx = str(i + 1)\n else:\n pfx = ''\n ret = username[0:max_length - len(pfx)] + pfx\n query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}\n User.objects.get(**query)\n i += 1\n except User.DoesNotExist:\n return ret\n\n\ndef valid_email_or_none(email):\n ret = None\n try:\n if email:\n validate_email(email)\n if len(email) <= EmailField().max_length:\n ret = email\n except ValidationError:\n pass\n return ret\n\n\ndef email_address_exists(email, exclude_user=None):\n from .account import app_settings as account_settings\n from .account.models import EmailAddress\n\n emailaddresses = EmailAddress.objects\n if exclude_user:\n emailaddresses = emailaddresses.exclude(user=exclude_user)\n ret = emailaddresses.filter(email__iexact=email).exists()\n if not ret:\n email_field = account_settings.USER_MODEL_EMAIL_FIELD\n if email_field:\n users = get_user_model().objects\n if exclude_user:\n users = users.exclude(pk=exclude_user.pk)\n ret = users.filter(**{email_field+'__iexact': email}).exists()\n return ret\n\n\ndef import_attribute(path):\n assert isinstance(path, six.string_types)\n pkg, attr = path.rsplit('.', 1)\n ret = getattr(importlib.import_module(pkg), attr)\n return ret\n\n\ndef import_callable(path_or_callable):\n if not hasattr(path_or_callable, '__call__'):\n ret = import_attribute(path_or_callable)\n else:\n ret = path_or_callable\n return ret\n\ntry:\n from django.contrib.auth import get_user_model\nexcept ImportError:\n # To keep compatibility with Django 1.4\n def get_user_model():\n from . import app_settings\n from django.db.models import get_model\n\n try:\n app_label, model_name = app_settings.USER_MODEL.split('.')\n except ValueError:\n raise ImproperlyConfigured(\"AUTH_USER_MODEL must be of the\"\n \" form 'app_label.model_name'\")\n user_model = get_model(app_label, model_name)\n if user_model is None:\n raise ImproperlyConfigured(\"AUTH_USER_MODEL refers to model\"\n \" '%s' that has not been installed\"\n % app_settings.USER_MODEL)\n return user_model\n\n\ndef get_current_site(request=None):\n \"\"\"Wrapper around ``Site.objects.get_current`` to handle ``Site`` lookups\n by request in Django >= 1.8.\n\n :param request: optional request object\n :type request: :class:`django.http.HttpRequest`\n \"\"\"\n # >= django 1.8\n if request and hasattr(Site.objects, '_get_site_by_request'):\n site = Site.objects.get_current(request=request)\n else:\n site = Site.objects.get_current()\n\n return site\n\n\ndef resolve_url(to):\n \"\"\"\n Subset of django.shortcuts.resolve_url (that one is 1.5+)\n \"\"\"\n try:\n return urlresolvers.reverse(to)\n except urlresolvers.NoReverseMatch:\n # If this doesn't \"feel\" like a URL, re-raise.\n if '/' not in to and '.' not in to:\n raise\n # Finally, fall back and assume it's a URL\n return to\n\n\ndef serialize_instance(instance):\n \"\"\"\n Since Django 1.6 items added to the session are no longer pickled,\n but JSON encoded by default. We are storing partially complete models\n in the session (user, account, token, ...). We cannot use standard\n Django serialization, as these are models are not \"complete\" yet.\n Serialization will start complaining about missing relations et al.\n \"\"\"\n ret = dict([(k, v)\n for k, v in instance.__dict__.items()\n if not (k.startswith('_') or callable(v))])\n return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))\n\n\ndef deserialize_instance(model, data):\n ret = model()\n for k, v in data.items():\n if v is not None:\n try:\n f = model._meta.get_field(k)\n if isinstance(f, DateTimeField):\n v = dateparse.parse_datetime(v)\n elif isinstance(f, TimeField):\n v = dateparse.parse_time(v)\n elif isinstance(f, DateField):\n v = dateparse.parse_date(v)\n except FieldDoesNotExist:\n pass\n setattr(ret, k, v)\n return ret\n\n\ndef set_form_field_order(form, fields_order):\n if isinstance(form.fields, SortedDict):\n form.fields.keyOrder = fields_order\n else:\n # Python 2.7+\n from collections import OrderedDict\n assert isinstance(form.fields, OrderedDict)\n form.fields = OrderedDict((f, form.fields[f])\n for f in fields_order)\n\n\ndef build_absolute_uri(request, location, protocol=None):\n uri = request.build_absolute_uri(location)\n if protocol:\n uri = protocol + ':' + uri.partition(':')[2]\n return uri\n\n\ndef get_form_class(forms, form_id, default_form):\n form_class = forms.get(form_id, default_form)\n if isinstance(form_class, six.string_types):\n form_class = import_attribute(form_class)\n return form_class\n\n\ndef get_request_param(request, param, default=None):\n return request.POST.get(param) or request.GET.get(param, default)\n", "path": "allauth/utils.py" } ]
diff --git a/allauth/tests.py b/allauth/tests.py index abee482765..9cabcb6071 100644 --- a/allauth/tests.py +++ b/allauth/tests.py @@ -85,12 +85,20 @@ class SomeModel(models.Model): dt = models.DateTimeField() t = models.TimeField() d = models.DateField() + + def method(self): + pass + instance = SomeModel(dt=datetime.now(), d=date.today(), t=datetime.now().time()) + # make sure serializer doesn't fail if a method is attached to the instance + instance.method = method instance.nonfield = 'hello' data = utils.serialize_instance(instance) instance2 = utils.deserialize_instance(SomeModel, data) + self.assertEqual(getattr(instance, 'method', None), method) + self.assertEqual(getattr(instance2, 'method', None), None) self.assertEqual(instance.nonfield, instance2.nonfield) self.assertEqual(instance.d, instance2.d) self.assertEqual(instance.dt.date(), instance2.dt.date()) diff --git a/allauth/utils.py b/allauth/utils.py index 2c76efd636..82cdfc2551 100644 --- a/allauth/utils.py +++ b/allauth/utils.py @@ -180,7 +180,7 @@ def serialize_instance(instance): """ ret = dict([(k, v) for k, v in instance.__dict__.items() - if not k.startswith('_')]) + if not (k.startswith('_') or callable(v))]) return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))
iterative__dvc-5085
UTF-8 codec error while using dvc pull (surrogates not allowed) # Bug Report ## Description While using dvc pull, the task errors out with: `ERROR: unexpected error - 'utf-8' codec can't encode characters in position 103-110: surrogates not allowed ` ### Reproduce 1. Try to push files with special characters in it. Ex: sydney-australia-–-january-–.txt 2. Set dvc cache type to reflink. 3. Try running dvc pull on the files that were pushed. ### Environment information **Output of `dvc version`:** ```console $ dvc version DVC version: 1.10.2 (brew) --------------------------------- Platform: Python 3.9.0 on macOS-10.15.7-x86_64-i386-64bit Supports: azure, gdrive, gs, http, https, s3, ssh, oss, webdav, webdavs Cache types: reflink, hardlink, symlink Caches: local Remotes: s3 Repo: dvc, git ``` ### Fix By default on MacOS, the cache type used by DVC is reflink. By changing it to either symlink, hardlink or copy we can avoid the codec error. You can change the cache type this by using `dvc config cache.type <type>` More on the cache types: https://dvc.org/doc/user-guide/large-dataset-optimization ``` 2020-12-11 16:32:04,319 DEBUG: 'aug_set/annotations/sydney-australia-–-january-–-red-bull-energy-drink-mini-cooper-publicity-car-can-red-bull-drink-behind-used-123644712-0.json' doesn't exist. 2020-12-11 16:32:04,321 DEBUG: fetched: [(92173,)] 4:33 2020-12-11 16:32:04,325 ERROR: unexpected error - 'utf-8' codec can't encode characters in position 103-110: surrogates not allowed ------------------------------------------------------------ Traceback (most recent call last): File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/main.py", line 90, in main ret = cmd.run() File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/command/data_sync.py", line 26, in run stats = self.repo.pull( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/repo/__init__.py", line 60, in wrapper return f(repo, *args, **kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/repo/pull.py", line 36, in pull stats = self.checkout( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/repo/__init__.py", line 60, in wrapper return f(repo, *args, **kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/repo/checkout.py", line 96, in checkout result = stage.checkout( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/funcy/decorators.py", line 39, in wrapper return deco(call, *dargs, **dkwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/stage/decorators.py", line 36, in rwlocked return call() File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/funcy/decorators.py", line 60, in __call__ return self._func(*self._args, **self._kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/stage/__init__.py", line 502, in checkout key, outs = self._checkout(out, **kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/stage/__init__.py", line 510, in _checkout result = out.checkout(**kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/output/base.py", line 356, in checkout return self.cache.checkout( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/funcy/decorators.py", line 39, in wrapper return deco(call, *dargs, **dkwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 40, in use_state return call() File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/funcy/decorators.py", line 60, in __call__ return self._func(*self._args, **self._kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 555, in checkout return self._checkout( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 578, in _checkout return self._checkout_dir( File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 472, in _checkout_dir self.link(entry_cache_info, entry_info) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 141, in link self._link(from_info, to_info, self.cache_types) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 148, in _link self._try_links(from_info, to_info, link_types) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/remote/slow_link_detection.py", line 38, in wrapper result = f(remote, *args, **kwargs) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 166, in _try_links self._do_link(from_info, to_info, link_method) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/cache/base.py", line 182, in _do_link link_method(from_info, to_info) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/tree/local.py", line 240, in reflink System.reflink(from_info, tmp_info) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/system.py", line 108, in reflink ret = System._reflink_darwin(source, link_name) File "/usr/local/Cellar/dvc/1.10.2/libexec/lib/python3.9/site-packages/dvc/system.py", line 75, in _reflink_darwin ctypes.c_char_p(dst.encode("utf-8")), UnicodeEncodeError: 'utf-8' codec can't encode characters in position 103-110: surrogates not allowed ------------------------------------------------------------ 2020-12-11 16:32:04,710 DEBUG: Version info for developers: DVC version: 1.10.2 (brew) --------------------------------- Platform: Python 3.9.0 on macOS-10.15.7-x86_64-i386-64bit Supports: azure, gdrive, gs, http, https, s3, ssh, oss, webdav, webdavs Cache types: reflink, hardlink, symlink Caches: local Remotes: s3 Repo: dvc, git Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2020-12-11 16:32:04,712 DEBUG: Analytics is disabled. ```
[ { "content": "import errno\nimport logging\nimport os\nimport platform\nimport shutil\nimport sys\n\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\nif (\n platform.system() == \"Windows\"\n and sys.version_info < (3, 8)\n and sys.getwindowsversion() >= (6, 2)\n):\n try:\n import speedcopy\n\n speedcopy.patch_copyfile()\n except ImportError:\n pass\n\n\nclass System:\n @staticmethod\n def is_unix():\n return os.name != \"nt\"\n\n @staticmethod\n def copy(src, dest):\n return shutil.copyfile(src, dest)\n\n @staticmethod\n def hardlink(source, link_name):\n try:\n os.link(source, link_name)\n except OSError as exc:\n raise DvcException(\"failed to link\") from exc\n\n @staticmethod\n def symlink(source, link_name):\n try:\n os.symlink(source, link_name)\n except OSError as exc:\n raise DvcException(\"failed to symlink\") from exc\n\n @staticmethod\n def _reflink_darwin(src, dst):\n import ctypes\n\n LIBC = \"libc.dylib\"\n LIBC_FALLBACK = \"/usr/lib/libSystem.dylib\"\n try:\n clib = ctypes.CDLL(LIBC)\n except OSError as exc:\n logger.debug(\n \"unable to access '{}' (errno '{}'). \"\n \"Falling back to '{}'.\".format(LIBC, exc.errno, LIBC_FALLBACK)\n )\n if exc.errno != errno.ENOENT:\n raise\n # NOTE: trying to bypass System Integrity Protection (SIP)\n clib = ctypes.CDLL(LIBC_FALLBACK)\n\n if not hasattr(clib, \"clonefile\"):\n return -1\n\n clonefile = clib.clonefile\n clonefile.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]\n clonefile.restype = ctypes.c_int\n\n return clonefile(\n ctypes.c_char_p(src.encode(\"utf-8\")),\n ctypes.c_char_p(dst.encode(\"utf-8\")),\n ctypes.c_int(0),\n )\n\n @staticmethod\n def _reflink_windows(_src, _dst):\n return -1\n\n @staticmethod\n def _reflink_linux(src, dst):\n import fcntl\n\n FICLONE = 0x40049409\n\n try:\n ret = 255\n with open(src) as s, open(dst, \"w+\") as d:\n ret = fcntl.ioctl(d.fileno(), FICLONE, s.fileno())\n finally:\n if ret != 0:\n os.unlink(dst)\n\n return ret\n\n @staticmethod\n def reflink(source, link_name):\n source, link_name = os.fspath(source), os.fspath(link_name)\n\n system = platform.system()\n try:\n if system == \"Windows\":\n ret = System._reflink_windows(source, link_name)\n elif system == \"Darwin\":\n ret = System._reflink_darwin(source, link_name)\n elif system == \"Linux\":\n ret = System._reflink_linux(source, link_name)\n else:\n ret = -1\n except OSError:\n ret = -1\n\n if ret != 0:\n raise DvcException(\"reflink is not supported\")\n\n @staticmethod\n def _getdirinfo(path):\n from collections import namedtuple\n\n from win32file import ( # pylint: disable=import-error\n FILE_FLAG_BACKUP_SEMANTICS,\n FILE_FLAG_OPEN_REPARSE_POINT,\n FILE_SHARE_READ,\n OPEN_EXISTING,\n CreateFileW,\n GetFileInformationByHandle,\n )\n\n # NOTE: use FILE_FLAG_OPEN_REPARSE_POINT to open symlink itself and not\n # the target See https://docs.microsoft.com/en-us/windows/desktop/api/\n # fileapi/nf-fileapi-createfilew#symbolic-link-behavior\n flags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT\n\n hfile = CreateFileW(\n path, 0, FILE_SHARE_READ, None, OPEN_EXISTING, flags, None\n )\n\n # See BY_HANDLE_FILE_INFORMATION structure from fileapi.h\n Info = namedtuple(\n \"BY_HANDLE_FILE_INFORMATION\",\n [\n \"dwFileAttributes\",\n \"ftCreationTime\",\n \"ftLastAccessTime\",\n \"ftLastWriteTime\",\n \"dwVolumeSerialNumber\",\n \"nFileSizeHigh\",\n \"nFileSizeLow\",\n \"nNumberOfLinks\",\n \"nFileIndexHigh\",\n \"nFileIndexLow\",\n ],\n )\n\n return Info(*GetFileInformationByHandle(hfile))\n\n @staticmethod\n def inode(path):\n path = os.fspath(path)\n\n if System.is_unix():\n import ctypes\n\n inode = os.lstat(path).st_ino\n # NOTE: See https://bugs.python.org/issue29619 and\n # https://stackoverflow.com/questions/34643289/\n # pythons-os-stat-is-returning-wrong-inode-value\n inode = ctypes.c_ulong(inode).value\n else:\n # getdirinfo from ntfsutils works on both files and dirs\n info = System._getdirinfo(path)\n inode = abs(\n hash(\n (\n info.dwVolumeSerialNumber,\n info.nFileIndexHigh,\n info.nFileIndexLow,\n )\n )\n )\n assert inode >= 0\n assert inode < 2 ** 64\n return inode\n\n @staticmethod\n def is_symlink(path):\n path = os.fspath(path)\n\n if System.is_unix():\n return os.path.islink(path)\n\n # https://docs.microsoft.com/en-us/windows/desktop/fileio/\n # file-attribute-constants\n from winnt import ( # pylint: disable=import-error\n FILE_ATTRIBUTE_REPARSE_POINT,\n )\n\n if os.path.lexists(path):\n info = System._getdirinfo(path)\n return info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT\n return False\n\n @staticmethod\n def is_hardlink(path):\n path = os.fspath(path)\n\n if System.is_unix():\n return os.stat(path).st_nlink > 1\n\n info = System._getdirinfo(path)\n return info.nNumberOfLinks > 1\n", "path": "dvc/system.py" } ]
[ { "content": "import errno\nimport logging\nimport os\nimport platform\nimport shutil\nimport sys\n\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\nif (\n platform.system() == \"Windows\"\n and sys.version_info < (3, 8)\n and sys.getwindowsversion() >= (6, 2)\n):\n try:\n import speedcopy\n\n speedcopy.patch_copyfile()\n except ImportError:\n pass\n\n\nclass System:\n @staticmethod\n def is_unix():\n return os.name != \"nt\"\n\n @staticmethod\n def copy(src, dest):\n return shutil.copyfile(src, dest)\n\n @staticmethod\n def hardlink(source, link_name):\n try:\n os.link(source, link_name)\n except OSError as exc:\n raise DvcException(\"failed to link\") from exc\n\n @staticmethod\n def symlink(source, link_name):\n try:\n os.symlink(source, link_name)\n except OSError as exc:\n raise DvcException(\"failed to symlink\") from exc\n\n @staticmethod\n def _reflink_darwin(src, dst):\n import ctypes\n\n LIBC = \"libc.dylib\"\n LIBC_FALLBACK = \"/usr/lib/libSystem.dylib\"\n try:\n clib = ctypes.CDLL(LIBC)\n except OSError as exc:\n logger.debug(\n \"unable to access '{}' (errno '{}'). \"\n \"Falling back to '{}'.\".format(LIBC, exc.errno, LIBC_FALLBACK)\n )\n if exc.errno != errno.ENOENT:\n raise\n # NOTE: trying to bypass System Integrity Protection (SIP)\n clib = ctypes.CDLL(LIBC_FALLBACK)\n\n if not hasattr(clib, \"clonefile\"):\n return -1\n\n clonefile = clib.clonefile\n clonefile.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]\n clonefile.restype = ctypes.c_int\n\n return clonefile(\n ctypes.c_char_p(os.fsencode(src)),\n ctypes.c_char_p(os.fsencode(dst)),\n ctypes.c_int(0),\n )\n\n @staticmethod\n def _reflink_windows(_src, _dst):\n return -1\n\n @staticmethod\n def _reflink_linux(src, dst):\n import fcntl\n\n FICLONE = 0x40049409\n\n try:\n ret = 255\n with open(src) as s, open(dst, \"w+\") as d:\n ret = fcntl.ioctl(d.fileno(), FICLONE, s.fileno())\n finally:\n if ret != 0:\n os.unlink(dst)\n\n return ret\n\n @staticmethod\n def reflink(source, link_name):\n source, link_name = os.fspath(source), os.fspath(link_name)\n\n system = platform.system()\n try:\n if system == \"Windows\":\n ret = System._reflink_windows(source, link_name)\n elif system == \"Darwin\":\n ret = System._reflink_darwin(source, link_name)\n elif system == \"Linux\":\n ret = System._reflink_linux(source, link_name)\n else:\n ret = -1\n except OSError:\n ret = -1\n\n if ret != 0:\n raise DvcException(\"reflink is not supported\")\n\n @staticmethod\n def _getdirinfo(path):\n from collections import namedtuple\n\n from win32file import ( # pylint: disable=import-error\n FILE_FLAG_BACKUP_SEMANTICS,\n FILE_FLAG_OPEN_REPARSE_POINT,\n FILE_SHARE_READ,\n OPEN_EXISTING,\n CreateFileW,\n GetFileInformationByHandle,\n )\n\n # NOTE: use FILE_FLAG_OPEN_REPARSE_POINT to open symlink itself and not\n # the target See https://docs.microsoft.com/en-us/windows/desktop/api/\n # fileapi/nf-fileapi-createfilew#symbolic-link-behavior\n flags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT\n\n hfile = CreateFileW(\n path, 0, FILE_SHARE_READ, None, OPEN_EXISTING, flags, None\n )\n\n # See BY_HANDLE_FILE_INFORMATION structure from fileapi.h\n Info = namedtuple(\n \"BY_HANDLE_FILE_INFORMATION\",\n [\n \"dwFileAttributes\",\n \"ftCreationTime\",\n \"ftLastAccessTime\",\n \"ftLastWriteTime\",\n \"dwVolumeSerialNumber\",\n \"nFileSizeHigh\",\n \"nFileSizeLow\",\n \"nNumberOfLinks\",\n \"nFileIndexHigh\",\n \"nFileIndexLow\",\n ],\n )\n\n return Info(*GetFileInformationByHandle(hfile))\n\n @staticmethod\n def inode(path):\n path = os.fspath(path)\n\n if System.is_unix():\n import ctypes\n\n inode = os.lstat(path).st_ino\n # NOTE: See https://bugs.python.org/issue29619 and\n # https://stackoverflow.com/questions/34643289/\n # pythons-os-stat-is-returning-wrong-inode-value\n inode = ctypes.c_ulong(inode).value\n else:\n # getdirinfo from ntfsutils works on both files and dirs\n info = System._getdirinfo(path)\n inode = abs(\n hash(\n (\n info.dwVolumeSerialNumber,\n info.nFileIndexHigh,\n info.nFileIndexLow,\n )\n )\n )\n assert inode >= 0\n assert inode < 2 ** 64\n return inode\n\n @staticmethod\n def is_symlink(path):\n path = os.fspath(path)\n\n if System.is_unix():\n return os.path.islink(path)\n\n # https://docs.microsoft.com/en-us/windows/desktop/fileio/\n # file-attribute-constants\n from winnt import ( # pylint: disable=import-error\n FILE_ATTRIBUTE_REPARSE_POINT,\n )\n\n if os.path.lexists(path):\n info = System._getdirinfo(path)\n return info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT\n return False\n\n @staticmethod\n def is_hardlink(path):\n path = os.fspath(path)\n\n if System.is_unix():\n return os.stat(path).st_nlink > 1\n\n info = System._getdirinfo(path)\n return info.nNumberOfLinks > 1\n", "path": "dvc/system.py" } ]
diff --git a/dvc/system.py b/dvc/system.py index 1d7f0a9b5b..4b00bd8347 100644 --- a/dvc/system.py +++ b/dvc/system.py @@ -71,8 +71,8 @@ def _reflink_darwin(src, dst): clonefile.restype = ctypes.c_int return clonefile( - ctypes.c_char_p(src.encode("utf-8")), - ctypes.c_char_p(dst.encode("utf-8")), + ctypes.c_char_p(os.fsencode(src)), + ctypes.c_char_p(os.fsencode(dst)), ctypes.c_int(0), )
liqd__a4-opin-1799
Changing the Organisation Details is not possible **URL:** https://opin.me/en/dashboard/organisations/liquid-democracy/settings/ **user:** Initiators, who try to fill in the Organisations details & as an admin too. **expected behaviour:** If I fill in Organisation details, save them and it is there **behaviour:** I fill in the Organisation details, press save and it reloads, but do not save. **important screensize:** **device & browser:** Firefox 73.0.1 (64-Bit) **Comment/Question:** Screenshot?
[ { "content": "\nimport parler\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.organisations.models import Organisation\n\n\nclass OrganisationForm(forms.ModelForm):\n translated_fields = [\n ('description_why', forms.CharField, {\n 'label': _('description why'),\n 'widget': forms.Textarea,\n }),\n ('description_how', forms.CharField, {\n 'widget': forms.Textarea,\n 'label': _('description how')\n }),\n ('description', forms.CharField, {\n 'label': _('description'),\n 'help_text': _(\n 'More info about the organisation / '\n 'Short text for organisation overview'),\n 'widget': forms.Textarea,\n })\n ]\n languages = [lang_code for lang_code, lang in settings.LANGUAGES]\n\n class Meta:\n model = Organisation\n fields = [\n 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',\n 'instagram_handle', 'webpage', 'country', 'place'\n ]\n help_texts = {\n 'name': _('The title of your organisation'),\n }\n\n def _get_identifier(self, language, fieldname):\n return '{}__{}'.format(language, fieldname)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # inject additional form fields for translated model fields\n for lang_code in self.languages:\n for name, field_cls, kwargs in self.translated_fields:\n self.instance.set_current_language(lang_code)\n field = field_cls(**kwargs)\n identifier = self._get_identifier(\n lang_code, name)\n field.required = False\n\n try:\n translation = self.instance.get_translation(lang_code)\n initial = getattr(translation, name)\n except parler.models.TranslationDoesNotExist:\n initial = ''\n\n field.initial = initial\n self.fields[identifier] = field\n\n def translated(self):\n \"\"\"\n Return translated fields as list of tuples (language code, fields).\n \"\"\"\n\n from itertools import groupby\n fields = [(field.html_name.split('__')[0], field) for field in self\n if '__' in field.html_name]\n groups = groupby(fields, lambda x: x[0])\n values = [(lang, list(map(lambda x: x[1], group)))\n for lang, group in groups]\n return values\n\n def untranslated(self):\n \"\"\"\n Return untranslated fields as flat list.\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n\n def prefiled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n languages = [lang for lang in self.languages\n if lang in self.data\n or self.instance.has_translation(lang)]\n # always provide english\n if 'en' not in languages:\n languages.insert(0, 'en')\n return languages\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n if commit is True:\n for lang_code in self.languages:\n if lang_code in self.data:\n instance.set_current_language(lang_code)\n for fieldname, _cls, _kwargs in self.translated_fields:\n identifier = '{}__{}'.format(lang_code, fieldname)\n setattr(instance, fieldname,\n self.cleaned_data.get(identifier))\n instance.save()\n elif instance.has_translation(lang_code):\n instance.delete_translation(lang_code)\n return instance\n\n def clean(self):\n for lang_code in self.languages:\n if lang_code in self.data:\n for fieldname in self.translated_fields:\n identifier = self._get_identifier(lang_code, fieldname[0])\n data = self.cleaned_data\n if identifier not in data or not data[identifier]:\n msg = 'This field is required'\n raise ValidationError((identifier, msg))\n\n return self.cleaned_data\n", "path": "euth/dashboard/forms.py" } ]
[ { "content": "\nimport parler\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.organisations.models import Organisation\n\n\nclass OrganisationForm(forms.ModelForm):\n translated_fields = [\n ('description_why', forms.CharField, {\n 'label': _('description why'),\n 'widget': forms.Textarea,\n }),\n ('description_how', forms.CharField, {\n 'widget': forms.Textarea,\n 'label': _('description how')\n }),\n ('description', forms.CharField, {\n 'label': _('description'),\n 'help_text': _(\n 'More info about the organisation / '\n 'Short text for organisation overview'),\n 'widget': forms.Textarea,\n })\n ]\n languages = [lang_code for lang_code, lang in settings.LANGUAGES]\n\n class Meta:\n model = Organisation\n fields = [\n 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',\n 'instagram_handle', 'webpage', 'country', 'place'\n ]\n help_texts = {\n 'name': _('The title of your organisation'),\n }\n\n def _get_identifier(self, language, fieldname):\n return '{}__{}'.format(language, fieldname)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # inject additional form fields for translated model fields\n for lang_code in self.languages:\n for name, field_cls, kwargs in self.translated_fields:\n self.instance.set_current_language(lang_code)\n field = field_cls(**kwargs)\n identifier = self._get_identifier(\n lang_code, name)\n field.required = False\n\n try:\n translation = self.instance.get_translation(lang_code)\n initial = getattr(translation, name)\n except parler.models.TranslationDoesNotExist:\n initial = ''\n\n field.initial = initial\n self.fields[identifier] = field\n\n def translated(self):\n \"\"\"\n Return translated fields as list of tuples (language code, fields).\n \"\"\"\n\n from itertools import groupby\n fields = [(field.html_name.split('__')[0], field) for field in self\n if '__' in field.html_name]\n groups = groupby(fields, lambda x: x[0])\n values = [(lang, list(map(lambda x: x[1], group)))\n for lang, group in groups]\n return values\n\n def untranslated(self):\n \"\"\"\n Return untranslated fields as flat list.\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n\n def prefilled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n languages = [lang for lang in self.languages\n if lang in self.data\n or self.instance.has_translation(lang)]\n # always provide english\n if 'en' not in languages:\n languages.insert(0, 'en')\n return languages\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n if commit is True:\n for lang_code in self.languages:\n if lang_code in self.data:\n instance.set_current_language(lang_code)\n for fieldname, _cls, _kwargs in self.translated_fields:\n identifier = '{}__{}'.format(lang_code, fieldname)\n setattr(instance, fieldname,\n self.cleaned_data.get(identifier))\n instance.save()\n elif instance.has_translation(lang_code):\n instance.delete_translation(lang_code)\n return instance\n\n def clean(self):\n for lang_code in self.languages:\n if lang_code in self.data:\n for fieldname in self.translated_fields:\n identifier = self._get_identifier(lang_code, fieldname[0])\n data = self.cleaned_data\n if identifier not in data or not data[identifier]:\n msg = 'This field is required'\n raise ValidationError((identifier, msg))\n\n return self.cleaned_data\n", "path": "euth/dashboard/forms.py" } ]
diff --git a/euth/dashboard/forms.py b/euth/dashboard/forms.py index 9b54fb2d9..3ebcdb17f 100644 --- a/euth/dashboard/forms.py +++ b/euth/dashboard/forms.py @@ -81,7 +81,7 @@ def untranslated(self): """ return [field for field in self if '__' not in field.html_name] - def prefiled_languages(self): + def prefilled_languages(self): """ Return languages tabs that need to be displayed. """ diff --git a/euth/dashboard/static/language_switch/react_language_switch.jsx b/euth/dashboard/static/language_switch/react_language_switch.jsx index d777f4833..aea148c3b 100644 --- a/euth/dashboard/static/language_switch/react_language_switch.jsx +++ b/euth/dashboard/static/language_switch/react_language_switch.jsx @@ -1,7 +1,6 @@ var PropTypes = require('prop-types') var React = require('react') var ReactDOM = require('react-dom') -var $ = require('jquery') class LanguageSwitch extends React.Component { constructor (props) { @@ -11,33 +10,32 @@ class LanguageSwitch extends React.Component { } } - switchLanguage (e) { + addLanguage (e) { var languageCode = e.target.textContent var index = this.state.activeLanguages.indexOf(languageCode) var newActiveLanguages = this.state.activeLanguages.concat([]) if (index === -1) { // adding language newActiveLanguages.push(languageCode) - } else { - newActiveLanguages.splice(index, 1) } this.setState({ activeLanguages: newActiveLanguages - }, function () { - var $checkbox = $('#' + languageCode + '_language-switch') - // language was active - if (!$checkbox.is(':checked')) { - $(this.refs.checkboxList).find(':checked').first().next('a').tab('show') - } else { - $checkbox.next('a').tab('show') - } }) } - componentDidMount () { - $(this.refs.toggleButton).dropdown() - $(this.refs.checkboxList).find('.a').tab() + removeLanguage (e) { + var languageCode = e.target.textContent + var index = this.state.activeLanguages.indexOf(languageCode) + var newActiveLanguages = this.state.activeLanguages.concat([]) + if (index !== -1) { + // removing language + newActiveLanguages.splice(index, 1) + } + + this.setState({ + activeLanguages: newActiveLanguages + }) } render () { @@ -68,10 +66,29 @@ class LanguageSwitch extends React.Component { </button> <ul className="dropdown-menu"> { - this.props.languages.map(languageCode => { + this.props.languages.map((languageCode, i) => { + return ( + <span key={languageCode}> + {this.state.activeLanguages.indexOf(languageCode) === -1 && + <li key={languageCode}> + <button type="button" onClick={this.addLanguage.bind(this)}>{languageCode}</button> + </li>} + </span> + ) + }) + } + </ul> + </div> + <div className="dropdown"> + <button className="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" ref="toggleButton"> + <i className="fa fa-minus" /> + </button> + <ul className="dropdown-menu"> + { + this.state.activeLanguages.map(languageCode => { return ( <li key={languageCode}> - <button type="button" onClick={this.switchLanguage.bind(this)}>{languageCode}</button> + <button type="button" onClick={this.removeLanguage.bind(this)}>{languageCode}</button> </li> ) }) diff --git a/euth/organisations/templates/euth_organisations/organisation_form.html b/euth/organisations/templates/euth_organisations/organisation_form.html index 30d6f8afd..afdc04283 100644 --- a/euth/organisations/templates/euth_organisations/organisation_form.html +++ b/euth/organisations/templates/euth_organisations/organisation_form.html @@ -29,7 +29,7 @@ <h1 class="dashboard-content-heading">{% trans "Edit your organisation details" id="language-switch-list" data-euth-widget="language-switch" data-languages="{{ form.languages|join:' ' }}" - data-active-languages="{{ form.prefiled_languages|join:' ' }}" + data-active-languages="{{ form.prefilled_languages|join:' ' }}" ></div> {% for lang_code, fields in form.translated %} diff --git a/tests/dashboard/test_dashboard_views.py b/tests/dashboard/test_dashboard_views.py index b1d54e46e..987f7b1a1 100644 --- a/tests/dashboard/test_dashboard_views.py +++ b/tests/dashboard/test_dashboard_views.py @@ -244,7 +244,7 @@ def test_dashboard_update_organisation(client, organisation): response = client.get(url) form = response.context_data['form'] - assert form.prefiled_languages() == ['en'] + assert form.prefilled_languages() == ['en'] assert len(form.untranslated()) == 9 assert len(form.translated()) == 10 assert form.translated()[0][0] == 'en'
learningequality__kolibri-7761
Kolibri fails to start if the timezone is detected incorrectly ### Observed behavior In some environments, the time zone offset detected by tzlocal may not match the time zone offset of Python's `time.localtime()`. In that case, tzlocal's `get_localzone()` raises a `ValueError` exception: https://github.com/regebro/tzlocal/blob/c5282c6feded0d576937c0dcdf1f4fd00a95fbee/tzlocal/utils.py#L34-L46. ### Expected behavior It looks like Kolibri handles an `UnknownTimeZoneError` exception from pytz already. To solve this issue, we could handle the `ValueError` from tzlocal in the same way: https://github.com/learningequality/kolibri/blob/release-v0.14.x/kolibri/deployment/default/settings/base.py#L252-L257. (It is unfortunate that tzlocal throws such a generic exception here, but we probably need to put up with it). ### User-facing consequences At the moment, Kolibri is failing to start on Linux when using the `America/Sao_Paulo` timezone: ``` $ flatpak run org.learningequality.Kolibri WARNING:ifcfg:Neither `ifconfig` (`ifconfig -a`) nor `ip` (`ip address show`) commands are available, listing network interfaces is likely to fail Process KolibriServiceMainProcess-1: Traceback (most recent call last): File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap self.run() File "/app/lib/python3.7/site-packages/kolibri_gnome/kolibri_service/kolibri_service_main.py", line 24, in run self.__run_kolibri_start() File "/app/lib/python3.7/site-packages/kolibri_gnome/kolibri_service/kolibri_service_main.py", line 49, in __run_kolibri_start initialize() File "/app/lib/python3.7/site-packages/kolibri/utils/cli.py", line 356, in initialize _setup_django() File "/app/lib/python3.7/site-packages/kolibri/utils/cli.py", line 293, in _setup_django django.setup() File "/app/lib/python3.7/site-packages/kolibri/dist/django/__init__.py", line 22, in setup configure_logging(settings.LOGGING_CONFIG, settings.LOGGING) File "/app/lib/python3.7/site-packages/kolibri/dist/django/conf/__init__.py", line 56, in __getattr__ self._setup(name) File "/app/lib/python3.7/site-packages/kolibri/dist/django/conf/__init__.py", line 41, in _setup self._wrapped = Settings(settings_module) File "/app/lib/python3.7/site-packages/kolibri/dist/django/conf/__init__.py", line 110, in __init__ mod = importlib.import_module(self.SETTINGS_MODULE) File "/usr/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 728, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/app/lib/python3.7/site-packages/kolibri_gnome/kolibri_settings.py", line 1, in <module> from kolibri.deployment.default.settings.base import * File "/app/lib/python3.7/site-packages/kolibri/deployment/default/settings/base.py", line 253, in <module> TIME_ZONE = get_localzone().zone File "/app/lib/python3.7/site-packages/kolibri/dist/tzlocal/unix.py", line 165, in get_localzone _cache_tz = _get_localzone() File "/app/lib/python3.7/site-packages/kolibri/dist/tzlocal/unix.py", line 90, in _get_localzone utils.assert_tz_offset(tz) File "/app/lib/python3.7/site-packages/kolibri/dist/tzlocal/utils.py", line 46, in assert_tz_offset raise ValueError(msg) ValueError: Timezone offset does not match system offset: -7200 != -10800. Please, check your config files. WARNING:kolibri_gnome.kolibri_service.kolibri_service_monitor:Kolibri service has died ``` (It appears Kolibri is including an old version of pytz which has incorrect information about DST for this timezone, but let's consider that tangential to this issue). ### Steps to reproduce Change your system timezone to "America/Sao_Paulo" and start Kolibri.
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\n\nimport pytz\nfrom django.conf import locale\nfrom six.moves.urllib.parse import urljoin\nfrom tzlocal import get_localzone\n\nimport kolibri\nfrom kolibri.deployment.default.cache import CACHES\nfrom kolibri.plugins.utils.settings import apply_settings\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\nfrom kolibri.utils.logger import get_logging_config\n\ntry:\n isolation_level = None\n import psycopg2 # noqa\n\n isolation_level = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE\nexcept ImportError:\n pass\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n# import kolibri, so we can get the path to the module.\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nLOCALE_PATHS = [os.path.join(KOLIBRI_MODULE_PATH, \"locale\")]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = conf.OPTIONS[\"Server\"][\"DEBUG\"]\n\nALLOWED_HOSTS = [\"*\"]\n\n# Application definition\n\nINSTALLED_APPS = [\n \"kolibri.core\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_filters\",\n \"kolibri.core.auth.apps.KolibriAuthConfig\",\n \"kolibri.core.content\",\n \"kolibri.core.logger\",\n \"kolibri.core.notifications.apps.KolibriNotificationsConfig\",\n \"kolibri.core.tasks.apps.KolibriTasksConfig\",\n \"kolibri.core.deviceadmin\",\n \"kolibri.core.webpack\",\n \"kolibri.core.exams\",\n \"kolibri.core.device\",\n \"kolibri.core.discovery\",\n \"kolibri.core.lessons\",\n \"kolibri.core.analytics\",\n \"rest_framework\",\n \"django_js_reverse\",\n \"jsonfield\",\n \"morango\",\n]\n\nMIDDLEWARE = [\n \"kolibri.core.analytics.middleware.cherrypy_access_log_middleware\",\n \"kolibri.core.device.middleware.ProvisioningErrorHandler\",\n \"django.middleware.cache.UpdateCacheMiddleware\",\n \"kolibri.core.analytics.middleware.MetricsMiddleware\",\n \"kolibri.core.auth.middleware.KolibriSessionMiddleware\",\n \"kolibri.core.device.middleware.KolibriLocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"kolibri.core.auth.middleware.CustomAuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.cache.FetchFromCacheMiddleware\",\n]\n\n# By default don't cache anything unless it explicitly requests it to!\nCACHE_MIDDLEWARE_SECONDS = 0\n\nCACHE_MIDDLEWARE_KEY_PREFIX = \"pages\"\n\nCACHES = CACHES\n\nROOT_URLCONF = \"kolibri.deployment.default.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"kolibri.core.context_processors.custom_context_processor.developer_mode\",\n ]\n },\n }\n]\n\nWSGI_APPLICATION = \"kolibri.deployment.default.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nif conf.OPTIONS[\"Database\"][\"DATABASE_ENGINE\"] == \"sqlite\":\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(\n conf.KOLIBRI_HOME,\n conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"] or \"db.sqlite3\",\n ),\n \"OPTIONS\": {\"timeout\": 100},\n },\n \"notifications_db\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(conf.KOLIBRI_HOME, \"notifications.sqlite3\"),\n \"OPTIONS\": {\"timeout\": 100},\n },\n }\n DATABASE_ROUTERS = (\"kolibri.core.notifications.models.NotificationsRouter\",)\n\nelif conf.OPTIONS[\"Database\"][\"DATABASE_ENGINE\"] == \"postgres\":\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"],\n \"PASSWORD\": conf.OPTIONS[\"Database\"][\"DATABASE_PASSWORD\"],\n \"USER\": conf.OPTIONS[\"Database\"][\"DATABASE_USER\"],\n \"HOST\": conf.OPTIONS[\"Database\"][\"DATABASE_HOST\"],\n \"PORT\": conf.OPTIONS[\"Database\"][\"DATABASE_PORT\"],\n },\n \"default-serializable\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"],\n \"PASSWORD\": conf.OPTIONS[\"Database\"][\"DATABASE_PASSWORD\"],\n \"USER\": conf.OPTIONS[\"Database\"][\"DATABASE_USER\"],\n \"HOST\": conf.OPTIONS[\"Database\"][\"DATABASE_HOST\"],\n \"PORT\": conf.OPTIONS[\"Database\"][\"DATABASE_PORT\"],\n \"OPTIONS\": {\"isolation_level\": isolation_level},\n },\n }\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\n# For language names, see:\n# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes\n# http://helpsharepointvision.nevron.com/Culture_Table.html\n\n# django-specific format, e.g.: [ ('bn-bd', 'বাংলা'), ('en', 'English'), ...]\nLANGUAGES = [\n (\n i18n.KOLIBRI_LANGUAGE_INFO[lang_code][\"intl_code\"],\n i18n.KOLIBRI_LANGUAGE_INFO[lang_code][\"language_name\"],\n )\n for lang_code in conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"]\n if lang_code in i18n.KOLIBRI_LANGUAGE_INFO\n]\n\n# Some languages are not supported out-of-the-box by Django\n# Here, we use the language code in Intl.js\nEXTRA_LANG_INFO = {\n \"ff-cm\": {\n \"bidi\": False,\n \"code\": \"ff-cm\",\n \"name\": \"Fulfulde (Cameroon)\",\n \"name_local\": \"Fulfulde Mbororoore\",\n },\n \"es-419\": {\n \"bidi\": False,\n \"code\": \"es-419\",\n \"name\": \"Spanish (Latin America)\",\n \"name_local\": \"Español\",\n },\n \"es-es\": {\n \"bidi\": False,\n \"code\": \"es-es\",\n \"name\": \"Spanish (Spain)\",\n \"name_local\": \"Español (España)\",\n },\n \"fr-ht\": {\n \"bidi\": False,\n \"code\": \"fr-ht\",\n \"name\": \"Haitian Creole\",\n \"name_local\": \"Kreyòl ayisyen\",\n },\n \"gu-in\": {\n \"bidi\": False,\n \"code\": \"gu-in\",\n \"name\": \"Gujarati\",\n \"name_local\": \"ગુજરાતી\",\n },\n \"km\": {\"bidi\": False, \"code\": \"km\", \"name\": \"Khmer\", \"name_local\": \"ភាសាខ្មែរ\"},\n \"nyn\": {\n \"bidi\": False,\n \"code\": \"nyn\",\n \"name\": \"Chichewa, Chewa, Nyanja\",\n \"name_local\": \"Chinyanja\",\n },\n \"zh\": {\n \"bidi\": False,\n \"code\": \"zh-hans\",\n \"name\": \"Simplified Chinese\",\n \"name_local\": \"简体中文\",\n },\n \"yo\": {\"bidi\": False, \"code\": \"yo\", \"name\": \"Yoruba\", \"name_local\": \"Yorùbá\"},\n \"zu\": {\"bidi\": False, \"code\": \"zu\", \"name\": \"Zulu\", \"name_local\": \"isiZulu\"},\n}\nlocale.LANG_INFO.update(EXTRA_LANG_INFO)\n\nLANGUAGE_CODE = (\n \"en\"\n if \"en\" in conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"]\n else conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"][0]\n)\n\ntry:\n TIME_ZONE = get_localzone().zone\nexcept pytz.UnknownTimeZoneError:\n # Do not fail at this point because a timezone was not\n # detected.\n TIME_ZONE = pytz.utc.zone\n\n# Fixes https://github.com/regebro/tzlocal/issues/44\n# tzlocal 1.4 returns 'local' if unable to detect the timezone,\n# and this TZ id is invalid\nif TIME_ZONE == \"local\":\n TIME_ZONE = pytz.utc.zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\npath_prefix = conf.OPTIONS[\"Deployment\"][\"URL_PATH_PREFIX\"]\n\nif path_prefix != \"/\":\n path_prefix = \"/\" + path_prefix\n\nSTATIC_URL = urljoin(path_prefix, \"static/\")\nSTATIC_ROOT = os.path.join(conf.KOLIBRI_HOME, \"static\")\nMEDIA_URL = urljoin(path_prefix, \"media/\")\nMEDIA_ROOT = os.path.join(conf.KOLIBRI_HOME, \"media\")\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#csrf-cookie-path\n# Ensure that our CSRF cookie does not collide with other CSRF cookies\n# set by other Django apps served from the same domain.\nCSRF_COOKIE_PATH = path_prefix\nCSRF_COOKIE_NAME = \"kolibri_csrftoken\"\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#session-cookie-path\n# Ensure that our session cookie does not collidge with other session cookies\n# set by other Django apps served from the same domain.\nSESSION_COOKIE_PATH = path_prefix\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.11/topics/logging/\n\nLOGGING = get_logging_config(\n conf.LOG_ROOT,\n debug=DEBUG,\n debug_database=conf.OPTIONS[\"Server\"][\"DEBUG_LOG_DATABASE\"],\n)\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/\n\nAUTH_USER_MODEL = \"kolibriauth.FacilityUser\"\n\n# Our own custom setting to override the anonymous user model\n\nAUTH_ANONYMOUS_USER_MODEL = \"kolibriauth.KolibriAnonymousUser\"\n\nAUTHENTICATION_BACKENDS = [\"kolibri.core.auth.backends.FacilityUserBackend\"]\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n \"rest_framework.authentication.SessionAuthentication\"\n ],\n \"DEFAULT_CONTENT_NEGOTIATION_CLASS\": \"kolibri.core.negotiation.LimitContentNegotiation\",\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.11/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_EXCLUDE_NAMESPACES = [\"admin\"]\n\nENABLE_DATA_BOOTSTRAPPING = True\n\n# Session configuration\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.file\"\n\nSESSION_FILE_PATH = os.path.join(conf.KOLIBRI_HOME, \"sessions\")\n\nif not os.path.exists(SESSION_FILE_PATH):\n if not os.path.exists(conf.KOLIBRI_HOME):\n raise RuntimeError(\"The KOLIBRI_HOME dir does not exist\")\n os.mkdir(SESSION_FILE_PATH)\n\nSESSION_COOKIE_NAME = \"kolibri\"\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nSESSION_COOKIE_AGE = 1200\n\napply_settings(sys.modules[__name__])\n", "path": "kolibri/deployment/default/settings/base.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\n\nimport pytz\nfrom django.conf import locale\nfrom six.moves.urllib.parse import urljoin\nfrom tzlocal import get_localzone\n\nimport kolibri\nfrom kolibri.deployment.default.cache import CACHES\nfrom kolibri.plugins.utils.settings import apply_settings\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\nfrom kolibri.utils.logger import get_logging_config\n\ntry:\n isolation_level = None\n import psycopg2 # noqa\n\n isolation_level = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE\nexcept ImportError:\n pass\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n# import kolibri, so we can get the path to the module.\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nLOCALE_PATHS = [os.path.join(KOLIBRI_MODULE_PATH, \"locale\")]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = conf.OPTIONS[\"Server\"][\"DEBUG\"]\n\nALLOWED_HOSTS = [\"*\"]\n\n# Application definition\n\nINSTALLED_APPS = [\n \"kolibri.core\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_filters\",\n \"kolibri.core.auth.apps.KolibriAuthConfig\",\n \"kolibri.core.content\",\n \"kolibri.core.logger\",\n \"kolibri.core.notifications.apps.KolibriNotificationsConfig\",\n \"kolibri.core.tasks.apps.KolibriTasksConfig\",\n \"kolibri.core.deviceadmin\",\n \"kolibri.core.webpack\",\n \"kolibri.core.exams\",\n \"kolibri.core.device\",\n \"kolibri.core.discovery\",\n \"kolibri.core.lessons\",\n \"kolibri.core.analytics\",\n \"rest_framework\",\n \"django_js_reverse\",\n \"jsonfield\",\n \"morango\",\n]\n\nMIDDLEWARE = [\n \"kolibri.core.analytics.middleware.cherrypy_access_log_middleware\",\n \"kolibri.core.device.middleware.ProvisioningErrorHandler\",\n \"django.middleware.cache.UpdateCacheMiddleware\",\n \"kolibri.core.analytics.middleware.MetricsMiddleware\",\n \"kolibri.core.auth.middleware.KolibriSessionMiddleware\",\n \"kolibri.core.device.middleware.KolibriLocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"kolibri.core.auth.middleware.CustomAuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.cache.FetchFromCacheMiddleware\",\n]\n\n# By default don't cache anything unless it explicitly requests it to!\nCACHE_MIDDLEWARE_SECONDS = 0\n\nCACHE_MIDDLEWARE_KEY_PREFIX = \"pages\"\n\nCACHES = CACHES\n\nROOT_URLCONF = \"kolibri.deployment.default.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"kolibri.core.context_processors.custom_context_processor.developer_mode\",\n ]\n },\n }\n]\n\nWSGI_APPLICATION = \"kolibri.deployment.default.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nif conf.OPTIONS[\"Database\"][\"DATABASE_ENGINE\"] == \"sqlite\":\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(\n conf.KOLIBRI_HOME,\n conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"] or \"db.sqlite3\",\n ),\n \"OPTIONS\": {\"timeout\": 100},\n },\n \"notifications_db\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(conf.KOLIBRI_HOME, \"notifications.sqlite3\"),\n \"OPTIONS\": {\"timeout\": 100},\n },\n }\n DATABASE_ROUTERS = (\"kolibri.core.notifications.models.NotificationsRouter\",)\n\nelif conf.OPTIONS[\"Database\"][\"DATABASE_ENGINE\"] == \"postgres\":\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"],\n \"PASSWORD\": conf.OPTIONS[\"Database\"][\"DATABASE_PASSWORD\"],\n \"USER\": conf.OPTIONS[\"Database\"][\"DATABASE_USER\"],\n \"HOST\": conf.OPTIONS[\"Database\"][\"DATABASE_HOST\"],\n \"PORT\": conf.OPTIONS[\"Database\"][\"DATABASE_PORT\"],\n },\n \"default-serializable\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": conf.OPTIONS[\"Database\"][\"DATABASE_NAME\"],\n \"PASSWORD\": conf.OPTIONS[\"Database\"][\"DATABASE_PASSWORD\"],\n \"USER\": conf.OPTIONS[\"Database\"][\"DATABASE_USER\"],\n \"HOST\": conf.OPTIONS[\"Database\"][\"DATABASE_HOST\"],\n \"PORT\": conf.OPTIONS[\"Database\"][\"DATABASE_PORT\"],\n \"OPTIONS\": {\"isolation_level\": isolation_level},\n },\n }\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\n# For language names, see:\n# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes\n# http://helpsharepointvision.nevron.com/Culture_Table.html\n\n# django-specific format, e.g.: [ ('bn-bd', 'বাংলা'), ('en', 'English'), ...]\nLANGUAGES = [\n (\n i18n.KOLIBRI_LANGUAGE_INFO[lang_code][\"intl_code\"],\n i18n.KOLIBRI_LANGUAGE_INFO[lang_code][\"language_name\"],\n )\n for lang_code in conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"]\n if lang_code in i18n.KOLIBRI_LANGUAGE_INFO\n]\n\n# Some languages are not supported out-of-the-box by Django\n# Here, we use the language code in Intl.js\nEXTRA_LANG_INFO = {\n \"ff-cm\": {\n \"bidi\": False,\n \"code\": \"ff-cm\",\n \"name\": \"Fulfulde (Cameroon)\",\n \"name_local\": \"Fulfulde Mbororoore\",\n },\n \"es-419\": {\n \"bidi\": False,\n \"code\": \"es-419\",\n \"name\": \"Spanish (Latin America)\",\n \"name_local\": \"Español\",\n },\n \"es-es\": {\n \"bidi\": False,\n \"code\": \"es-es\",\n \"name\": \"Spanish (Spain)\",\n \"name_local\": \"Español (España)\",\n },\n \"fr-ht\": {\n \"bidi\": False,\n \"code\": \"fr-ht\",\n \"name\": \"Haitian Creole\",\n \"name_local\": \"Kreyòl ayisyen\",\n },\n \"gu-in\": {\n \"bidi\": False,\n \"code\": \"gu-in\",\n \"name\": \"Gujarati\",\n \"name_local\": \"ગુજરાતી\",\n },\n \"km\": {\"bidi\": False, \"code\": \"km\", \"name\": \"Khmer\", \"name_local\": \"ភាសាខ្មែរ\"},\n \"nyn\": {\n \"bidi\": False,\n \"code\": \"nyn\",\n \"name\": \"Chichewa, Chewa, Nyanja\",\n \"name_local\": \"Chinyanja\",\n },\n \"zh\": {\n \"bidi\": False,\n \"code\": \"zh-hans\",\n \"name\": \"Simplified Chinese\",\n \"name_local\": \"简体中文\",\n },\n \"yo\": {\"bidi\": False, \"code\": \"yo\", \"name\": \"Yoruba\", \"name_local\": \"Yorùbá\"},\n \"zu\": {\"bidi\": False, \"code\": \"zu\", \"name\": \"Zulu\", \"name_local\": \"isiZulu\"},\n}\nlocale.LANG_INFO.update(EXTRA_LANG_INFO)\n\nLANGUAGE_CODE = (\n \"en\"\n if \"en\" in conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"]\n else conf.OPTIONS[\"Deployment\"][\"LANGUAGES\"][0]\n)\n\ntry:\n TIME_ZONE = get_localzone().zone\nexcept (pytz.UnknownTimeZoneError, ValueError):\n # Do not fail at this point because a timezone was not\n # detected.\n TIME_ZONE = pytz.utc.zone\n\n# Fixes https://github.com/regebro/tzlocal/issues/44\n# tzlocal 1.4 returns 'local' if unable to detect the timezone,\n# and this TZ id is invalid\nif TIME_ZONE == \"local\":\n TIME_ZONE = pytz.utc.zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\npath_prefix = conf.OPTIONS[\"Deployment\"][\"URL_PATH_PREFIX\"]\n\nif path_prefix != \"/\":\n path_prefix = \"/\" + path_prefix\n\nSTATIC_URL = urljoin(path_prefix, \"static/\")\nSTATIC_ROOT = os.path.join(conf.KOLIBRI_HOME, \"static\")\nMEDIA_URL = urljoin(path_prefix, \"media/\")\nMEDIA_ROOT = os.path.join(conf.KOLIBRI_HOME, \"media\")\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#csrf-cookie-path\n# Ensure that our CSRF cookie does not collide with other CSRF cookies\n# set by other Django apps served from the same domain.\nCSRF_COOKIE_PATH = path_prefix\nCSRF_COOKIE_NAME = \"kolibri_csrftoken\"\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#session-cookie-path\n# Ensure that our session cookie does not collidge with other session cookies\n# set by other Django apps served from the same domain.\nSESSION_COOKIE_PATH = path_prefix\n\n# https://docs.djangoproject.com/en/1.11/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.11/topics/logging/\n\nLOGGING = get_logging_config(\n conf.LOG_ROOT,\n debug=DEBUG,\n debug_database=conf.OPTIONS[\"Server\"][\"DEBUG_LOG_DATABASE\"],\n)\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/\n\nAUTH_USER_MODEL = \"kolibriauth.FacilityUser\"\n\n# Our own custom setting to override the anonymous user model\n\nAUTH_ANONYMOUS_USER_MODEL = \"kolibriauth.KolibriAnonymousUser\"\n\nAUTHENTICATION_BACKENDS = [\"kolibri.core.auth.backends.FacilityUserBackend\"]\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.core.auth.models.KolibriAnonymousUser\",\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n \"rest_framework.authentication.SessionAuthentication\"\n ],\n \"DEFAULT_CONTENT_NEGOTIATION_CLASS\": \"kolibri.core.negotiation.LimitContentNegotiation\",\n \"EXCEPTION_HANDLER\": \"kolibri.core.utils.exception_handler.custom_exception_handler\",\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.11/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_EXCLUDE_NAMESPACES = [\"admin\"]\n\nENABLE_DATA_BOOTSTRAPPING = True\n\n# Session configuration\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.file\"\n\nSESSION_FILE_PATH = os.path.join(conf.KOLIBRI_HOME, \"sessions\")\n\nif not os.path.exists(SESSION_FILE_PATH):\n if not os.path.exists(conf.KOLIBRI_HOME):\n raise RuntimeError(\"The KOLIBRI_HOME dir does not exist\")\n os.mkdir(SESSION_FILE_PATH)\n\nSESSION_COOKIE_NAME = \"kolibri\"\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nSESSION_COOKIE_AGE = 1200\n\napply_settings(sys.modules[__name__])\n", "path": "kolibri/deployment/default/settings/base.py" } ]
diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py index 167a1dad4bb..627e66e8e0c 100644 --- a/kolibri/deployment/default/settings/base.py +++ b/kolibri/deployment/default/settings/base.py @@ -251,7 +251,7 @@ try: TIME_ZONE = get_localzone().zone -except pytz.UnknownTimeZoneError: +except (pytz.UnknownTimeZoneError, ValueError): # Do not fail at this point because a timezone was not # detected. TIME_ZONE = pytz.utc.zone
fedora-infra__bodhi-1042
GET on /masher/ errors with a 404 response GET on `/masher` works, but GET on `/masher/` returns an HTTP 404 error code. We should add a route to allow the trailing slash on this URL.
[ { "content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nfrom collections import defaultdict\nfrom dogpile.cache import make_region\nfrom munch import munchify\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nfrom pyramid.settings import asbool\nfrom pyramid.security import unauthenticated_userid\nfrom pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.renderers import JSONP\nfrom pyramid.exceptions import HTTPForbidden\n\nfrom . import buildsys\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n# TODO -- someday move this externally to \"fedora_flavored_markdown\"\nfrom bodhi.server import ffmarkdown\nffmarkdown.inject()\n\n#\n# Request methods\n#\n\ndef get_dbsession(request):\n engine = engine_from_config(request.registry.settings, 'sqlalchemy.')\n Sess = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))\n Sess.configure(bind=engine)\n session = Sess()\n\n def cleanup(request):\n ## No need to do rollback/commit ourselves. the zope transaction\n ## manager takes care of that for us...\n #if request.exception is not None:\n # session.rollback()\n #else:\n # session.commit()\n ## However, we may still want to explicitly close the session we opened\n #log.debug(\"Closing session at the end of a request.\")\n session.close()\n\n request.add_finished_callback(cleanup)\n\n return session\n\n\ndef get_cacheregion(request):\n region = make_region()\n region.configure_from_config(request.registry.settings, \"dogpile.cache.\")\n return region\n\n\ndef get_user(request):\n from bodhi.server.models import User\n userid = unauthenticated_userid(request)\n if userid is not None:\n user = request.db.query(User).filter_by(name=unicode(userid)).first()\n # Why munch? https://github.com/fedora-infra/bodhi/issues/473\n return munchify(user.__json__(request=request))\n\n\ndef groupfinder(userid, request):\n from bodhi.server.models import User\n if request.user:\n user = User.get(request.user.name, request.db)\n return ['group:' + group.name for group in user.groups]\n\n\ndef get_koji(request):\n return buildsys.get_session()\n\n\ndef get_buildinfo(request):\n \"\"\"\n A per-request cache populated by the validators and shared with the views\n to store frequently used package-specific data, like build tags and ACLs.\n \"\"\"\n return defaultdict(dict)\n\n\ndef get_releases(request):\n from bodhi.server.models import Release\n return Release.all_releases(request.db)\n\n#\n# Cornice filters\n#\n\ndef exception_filter(response, request):\n \"\"\"Log exceptions that get thrown up to cornice\"\"\"\n if isinstance(response, Exception):\n log.exception('Unhandled exception raised: %r' % response)\n return response\n\nfrom cornice.validators import DEFAULT_FILTERS\nDEFAULT_FILTERS.insert(0, exception_filter)\n\n\n#\n# Bodhi initialization\n#\n\ndef main(global_config, testing=None, session=None, **settings):\n \"\"\" This function returns a WSGI application \"\"\"\n # Setup our buildsystem\n buildsys.setup_buildsystem(settings)\n\n # Sessions & Caching\n from pyramid.session import SignedCookieSessionFactory\n session_factory = SignedCookieSessionFactory(settings['session.secret'])\n\n # Construct a list of all groups we're interested in\n default = ' '.join([settings.get(key, '') for key in [\n 'important_groups',\n 'admin_packager_groups',\n 'mandatory_packager_groups',\n 'admin_groups',\n ]])\n # pyramid_fas_openid looks for this setting\n settings['openid.groups'] = settings.get('openid.groups', default).split()\n\n config = Configurator(settings=settings, session_factory=session_factory)\n\n # Plugins\n config.include('pyramid_mako')\n config.include('cornice')\n\n # Lazy-loaded memoized request properties\n if session:\n config.add_request_method(lambda _: session, 'db', reify=True)\n else:\n config.add_request_method(get_dbsession, 'db', reify=True)\n\n config.add_request_method(get_user, 'user', reify=True)\n config.add_request_method(get_koji, 'koji', reify=True)\n config.add_request_method(get_cacheregion, 'cache', reify=True)\n config.add_request_method(get_buildinfo, 'buildinfo', reify=True)\n config.add_request_method(get_releases, 'releases', reify=True)\n\n # Templating\n config.add_mako_renderer('.html', settings_prefix='mako.')\n config.add_static_view('static', 'bodhi:server/static')\n\n from bodhi.server.renderers import rss, jpeg\n config.add_renderer('rss', rss)\n config.add_renderer('jpeg', jpeg)\n config.add_renderer('jsonp', JSONP(param_name='callback'))\n\n # i18n\n config.add_translation_dirs('bodhi:server/locale/')\n\n # Authentication & Authorization\n if testing:\n # use a permissive security policy while running unit tests\n config.testing_securitypolicy(userid=testing, permissive=True)\n else:\n config.set_authentication_policy(AuthTktAuthenticationPolicy(\n settings['authtkt.secret'],\n callback=groupfinder,\n secure=asbool(settings['authtkt.secure']),\n hashalg='sha512'))\n config.set_authorization_policy(ACLAuthorizationPolicy())\n\n # Frontpage\n config.add_route('home', '/')\n\n # Views for creating new objects\n config.add_route('new_update', '/updates/new')\n config.add_route('new_override', '/overrides/new')\n config.add_route('new_stack', '/stacks/new')\n\n # Metrics\n config.add_route('metrics', '/metrics')\n config.add_route('masher_status', '/masher')\n\n # Auto-completion search\n config.add_route('search_packages', '/search/packages')\n config.add_route('latest_candidates', '/latest_candidates')\n config.add_route('latest_builds', '/latest_builds')\n\n config.add_route('captcha_image', '/captcha/{cipherkey}/')\n\n # pyramid.openid\n config.add_route('login', '/login')\n config.add_view('bodhi.server.security.login', route_name='login')\n config.add_view('bodhi.server.security.login', context=HTTPForbidden)\n config.add_route('logout', '/logout')\n config.add_view('bodhi.server.security.logout', route_name='logout')\n config.add_route('verify_openid', pattern='/dologin.html')\n config.add_view('pyramid_fas_openid.verify_openid', route_name='verify_openid')\n\n config.add_route('api_version', '/api_version')\n\n # The only user preference we have.\n config.add_route('popup_toggle', '/popup_toggle')\n\n config.scan('bodhi.server.views')\n config.scan('bodhi.server.services')\n config.scan('bodhi.server.captcha')\n config.scan('bodhi.server.events')\n\n return config.make_wsgi_app()\n", "path": "bodhi/server/__init__.py" } ]
[ { "content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nfrom collections import defaultdict\nfrom dogpile.cache import make_region\nfrom munch import munchify\nfrom sqlalchemy import engine_from_config\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom zope.sqlalchemy import ZopeTransactionExtension\n\nfrom pyramid.settings import asbool\nfrom pyramid.security import unauthenticated_userid\nfrom pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.renderers import JSONP\nfrom pyramid.exceptions import HTTPForbidden\n\nfrom . import buildsys\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n# TODO -- someday move this externally to \"fedora_flavored_markdown\"\nfrom bodhi.server import ffmarkdown\nffmarkdown.inject()\n\n#\n# Request methods\n#\n\ndef get_dbsession(request):\n engine = engine_from_config(request.registry.settings, 'sqlalchemy.')\n Sess = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))\n Sess.configure(bind=engine)\n session = Sess()\n\n def cleanup(request):\n ## No need to do rollback/commit ourselves. the zope transaction\n ## manager takes care of that for us...\n #if request.exception is not None:\n # session.rollback()\n #else:\n # session.commit()\n ## However, we may still want to explicitly close the session we opened\n #log.debug(\"Closing session at the end of a request.\")\n session.close()\n\n request.add_finished_callback(cleanup)\n\n return session\n\n\ndef get_cacheregion(request):\n region = make_region()\n region.configure_from_config(request.registry.settings, \"dogpile.cache.\")\n return region\n\n\ndef get_user(request):\n from bodhi.server.models import User\n userid = unauthenticated_userid(request)\n if userid is not None:\n user = request.db.query(User).filter_by(name=unicode(userid)).first()\n # Why munch? https://github.com/fedora-infra/bodhi/issues/473\n return munchify(user.__json__(request=request))\n\n\ndef groupfinder(userid, request):\n from bodhi.server.models import User\n if request.user:\n user = User.get(request.user.name, request.db)\n return ['group:' + group.name for group in user.groups]\n\n\ndef get_koji(request):\n return buildsys.get_session()\n\n\ndef get_buildinfo(request):\n \"\"\"\n A per-request cache populated by the validators and shared with the views\n to store frequently used package-specific data, like build tags and ACLs.\n \"\"\"\n return defaultdict(dict)\n\n\ndef get_releases(request):\n from bodhi.server.models import Release\n return Release.all_releases(request.db)\n\n#\n# Cornice filters\n#\n\ndef exception_filter(response, request):\n \"\"\"Log exceptions that get thrown up to cornice\"\"\"\n if isinstance(response, Exception):\n log.exception('Unhandled exception raised: %r' % response)\n return response\n\nfrom cornice.validators import DEFAULT_FILTERS\nDEFAULT_FILTERS.insert(0, exception_filter)\n\n\n#\n# Bodhi initialization\n#\n\ndef main(global_config, testing=None, session=None, **settings):\n \"\"\" This function returns a WSGI application \"\"\"\n # Setup our buildsystem\n buildsys.setup_buildsystem(settings)\n\n # Sessions & Caching\n from pyramid.session import SignedCookieSessionFactory\n session_factory = SignedCookieSessionFactory(settings['session.secret'])\n\n # Construct a list of all groups we're interested in\n default = ' '.join([settings.get(key, '') for key in [\n 'important_groups',\n 'admin_packager_groups',\n 'mandatory_packager_groups',\n 'admin_groups',\n ]])\n # pyramid_fas_openid looks for this setting\n settings['openid.groups'] = settings.get('openid.groups', default).split()\n\n config = Configurator(settings=settings, session_factory=session_factory)\n\n # Plugins\n config.include('pyramid_mako')\n config.include('cornice')\n\n # Lazy-loaded memoized request properties\n if session:\n config.add_request_method(lambda _: session, 'db', reify=True)\n else:\n config.add_request_method(get_dbsession, 'db', reify=True)\n\n config.add_request_method(get_user, 'user', reify=True)\n config.add_request_method(get_koji, 'koji', reify=True)\n config.add_request_method(get_cacheregion, 'cache', reify=True)\n config.add_request_method(get_buildinfo, 'buildinfo', reify=True)\n config.add_request_method(get_releases, 'releases', reify=True)\n\n # Templating\n config.add_mako_renderer('.html', settings_prefix='mako.')\n config.add_static_view('static', 'bodhi:server/static')\n\n from bodhi.server.renderers import rss, jpeg\n config.add_renderer('rss', rss)\n config.add_renderer('jpeg', jpeg)\n config.add_renderer('jsonp', JSONP(param_name='callback'))\n\n # i18n\n config.add_translation_dirs('bodhi:server/locale/')\n\n # Authentication & Authorization\n if testing:\n # use a permissive security policy while running unit tests\n config.testing_securitypolicy(userid=testing, permissive=True)\n else:\n config.set_authentication_policy(AuthTktAuthenticationPolicy(\n settings['authtkt.secret'],\n callback=groupfinder,\n secure=asbool(settings['authtkt.secure']),\n hashalg='sha512'))\n config.set_authorization_policy(ACLAuthorizationPolicy())\n\n # Frontpage\n config.add_route('home', '/')\n\n # Views for creating new objects\n config.add_route('new_update', '/updates/new')\n config.add_route('new_override', '/overrides/new')\n config.add_route('new_stack', '/stacks/new')\n\n # Metrics\n config.add_route('metrics', '/metrics')\n config.add_route('masher_status', '/masher/')\n\n # Auto-completion search\n config.add_route('search_packages', '/search/packages')\n config.add_route('latest_candidates', '/latest_candidates')\n config.add_route('latest_builds', '/latest_builds')\n\n config.add_route('captcha_image', '/captcha/{cipherkey}/')\n\n # pyramid.openid\n config.add_route('login', '/login')\n config.add_view('bodhi.server.security.login', route_name='login')\n config.add_view('bodhi.server.security.login', context=HTTPForbidden)\n config.add_route('logout', '/logout')\n config.add_view('bodhi.server.security.logout', route_name='logout')\n config.add_route('verify_openid', pattern='/dologin.html')\n config.add_view('pyramid_fas_openid.verify_openid', route_name='verify_openid')\n\n config.add_route('api_version', '/api_version')\n\n # The only user preference we have.\n config.add_route('popup_toggle', '/popup_toggle')\n\n config.scan('bodhi.server.views')\n config.scan('bodhi.server.services')\n config.scan('bodhi.server.captcha')\n config.scan('bodhi.server.events')\n\n return config.make_wsgi_app()\n", "path": "bodhi/server/__init__.py" } ]
diff --git a/bodhi/server/__init__.py b/bodhi/server/__init__.py index 8cd02bcf9c..35befc34ef 100644 --- a/bodhi/server/__init__.py +++ b/bodhi/server/__init__.py @@ -191,7 +191,7 @@ def main(global_config, testing=None, session=None, **settings): # Metrics config.add_route('metrics', '/metrics') - config.add_route('masher_status', '/masher') + config.add_route('masher_status', '/masher/') # Auto-completion search config.add_route('search_packages', '/search/packages')
networkx__networkx-2647
Readthedocs pain Readthedocs (RTD) is a pain to work with and keeps having timeout errors. I started to look into whether we can build the docs on our own and push them to RTD instead of having it built on the site. It would also make more sense to have the doc build process as part of our CI process, rather than only checked after the fact. Has there been any discussion about moving away from RTD before (at least the build process)? If so, was there a reason not to move? I assume it is too late to move back to hosting the docs on github, but I thought I'd check since it might be easier to do.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Sphinx documentation build configuration file, created by\n# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default value; values that are commented out\n# serve to show the default value.\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom datetime import date\n\nfrom sphinx_gallery.sorting import ExplicitOrder\n\n# Check Sphinx version\nimport sphinx\nif sphinx.__version__ < \"1.3\":\n raise RuntimeError(\"Sphinx 1.3 or newer required\")\n\n# Environment variable to know if the docs are being built on rtd.\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n#print\n#print(\"Building on ReadTheDocs: {}\".format(on_rtd))\n#print\n#print(\"Current working directory: {}\".format(os.path.abspath(os.curdir)))\n#print(\"Python: {}\".format(sys.executable))\n\n# If your extensions are in another directory, add it here.\n# These locations are relative to conf.py\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinx_gallery.gen_gallery',\n 'nb2plots',\n 'texext',\n]\n\n# https://github.com/sphinx-gallery/sphinx-gallery\nsphinx_gallery_conf = {\n # path to your examples scripts\n 'examples_dirs': '../examples',\n 'subsection_order': ExplicitOrder(['../examples/basic',\n '../examples/drawing',\n '../examples/graph',\n '../examples/algorithms',\n '../examples/advanced',\n '../examples/3d_drawing',\n '../examples/pygraphviz',\n '../examples/javascript',\n '../examples/jit',\n '../examples/subclass']),\n # path where to save gallery generated examples\n 'gallery_dirs': 'auto_examples',\n 'backreferences_dir': 'modules/generated',\n 'expected_failing_examples': ['../examples/advanced/plot_parallel_betweenness.py']\n}\n\n# generate autosummary pages\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\n#templates_path = ['']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General substitutions.\nproject = 'NetworkX'\ncopyright = '2004-{}, NetworkX Developers'.format(date.today().year)\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nimport networkx\nversion = networkx.__version__\n# The full version, including dev info\nrelease = networkx.__version__.replace('_', '')\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = ['']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# show_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'friendly'\npygments_style = 'sphinx'\n\n# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)\nmodindex_common_prefix = ['networkx.']\n\ndoctest_global_setup = \"import networkx as nx\"\n\n# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``\nnapoleon_use_param = False\n\n# Options for HTML output\n# -----------------------\n\nif not on_rtd:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# html_theme_options = {\n# \"rightsidebar\": \"true\",\n# \"relbarbgcolor: \"black\"\n#}\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n#html_style = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Content template for the index page.\n#html_index = 'index.html'\n\n# Custom sidebar templates, maps page names to templates.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# templates.\n#html_additional_pages = {'': ''}\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = False\n\nhtml_use_opensearch = 'http://networkx.github.io'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'NetworkX'\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [('reference/index', 'networkx_reference.tex',\n 'NetworkX Reference',\n 'Aric Hagberg, Dan Schult, Pieter Swart', 'manual', 1)]\n\nlatex_appendices = ['tutorial']\n\n# Intersphinx mapping\nintersphinx_mapping = {'https://docs.python.org/': None,\n 'https://docs.scipy.org/doc/numpy/': None,\n }\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'obj'\n\nnumpydoc_show_class_members = False\n\n# Add the 'copybutton' javascript, to hide/show the prompt in code\n# examples\ndef setup(app):\n app.add_javascript('copybutton.js')\n", "path": "doc/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Sphinx documentation build configuration file, created by\n# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default value; values that are commented out\n# serve to show the default value.\nfrom __future__ import print_function\n\nimport sys\nimport os\nfrom datetime import date\n\nfrom sphinx_gallery.sorting import ExplicitOrder\n\n# Check Sphinx version\nimport sphinx\nif sphinx.__version__ < \"1.3\":\n raise RuntimeError(\"Sphinx 1.3 or newer required\")\n\n# Environment variable to know if the docs are being built on rtd.\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n#print\n#print(\"Building on ReadTheDocs: {}\".format(on_rtd))\n#print\n#print(\"Current working directory: {}\".format(os.path.abspath(os.curdir)))\n#print(\"Python: {}\".format(sys.executable))\n\n# If your extensions are in another directory, add it here.\n# These locations are relative to conf.py\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinx_gallery.gen_gallery',\n 'nb2plots',\n 'texext',\n]\n\n# https://github.com/sphinx-gallery/sphinx-gallery\nsphinx_gallery_conf = {\n # path to your examples scripts\n 'examples_dirs': '../examples',\n 'subsection_order': ExplicitOrder(['../examples/basic',\n '../examples/drawing',\n '../examples/graph',\n '../examples/algorithms',\n '../examples/advanced',\n '../examples/3d_drawing',\n '../examples/pygraphviz',\n '../examples/javascript',\n '../examples/jit',\n '../examples/subclass']),\n # path where to save gallery generated examples\n 'gallery_dirs': 'auto_examples',\n 'backreferences_dir': 'modules/generated',\n 'expected_failing_examples': ['../examples/advanced/plot_parallel_betweenness.py']\n}\n\n# generate autosummary pages\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\n#templates_path = ['']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General substitutions.\nproject = 'NetworkX'\ncopyright = '2004-{}, NetworkX Developers'.format(date.today().year)\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nimport networkx\nversion = networkx.__version__\n# The full version, including dev info\nrelease = networkx.__version__.replace('_', '')\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = ['']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# show_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'friendly'\npygments_style = 'sphinx'\n\n# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)\nmodindex_common_prefix = ['networkx.']\n\ndoctest_global_setup = \"import networkx as nx\"\n\n# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``\nnapoleon_use_param = False\n\n# Options for HTML output\n# -----------------------\n\nif not on_rtd:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# html_theme_options = {\n# \"rightsidebar\": \"true\",\n# \"relbarbgcolor: \"black\"\n#}\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n#html_style = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Content template for the index page.\n#html_index = 'index.html'\n\n# Custom sidebar templates, maps page names to templates.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# templates.\n#html_additional_pages = {'': ''}\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = False\n\nhtml_use_opensearch = 'http://networkx.github.io'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'NetworkX'\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [('reference/index', 'networkx_reference.tex',\n 'NetworkX Reference',\n 'Aric Hagberg, Dan Schult, Pieter Swart', 'manual', 1)]\n\nlatex_appendices = ['tutorial']\n\n# Intersphinx mapping\nintersphinx_mapping = {'https://docs.python.org/2/': None,\n 'https://docs.scipy.org/doc/numpy/': None,\n }\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'obj'\n\nnumpydoc_show_class_members = False\n\n# Add the 'copybutton' javascript, to hide/show the prompt in code\n# examples\ndef setup(app):\n app.add_javascript('copybutton.js')\n", "path": "doc/conf.py" } ]
diff --git a/.gitignore b/.gitignore index 66bacd3f735..c97869cbe22 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ doc/networkx-documentation.zip doc/networkx_reference.pdf doc/networkx_tutorial.pdf doc/build +doc/doc_build .coverage *.class diff --git a/.travis.yml b/.travis.yml index 34d88b799b7..398ebf72b28 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,42 +5,75 @@ sudo: false language: python -python: - - "2.7" - - "3.4" - - "3.5" - - "3.6" - cache: directories: - $HOME/.cache/pip -addons: - apt: - packages: - - libgdal-dev - - graphviz - -env: - matrix: - - OPTIONAL_DEPS=pip - - OPTIONAL_DEPS=no - matrix: include: + - os: linux + python: 2.7 + env: + - OPTIONAL_DEPS=1 + - MINIMUM_REQUIREMENTS=1 + - REPORT_COVERAGE=1 + addons: + apt: + packages: + - libgdal-dev + - graphviz + - os: linux + python: 2.7 + env: + - OPTIONAL_DEPS=1 + - BUILD_DOCS=1 + - DEPLOY_DOCS=1 + addons: + apt: + packages: + - libgdal-dev + - graphviz + - texlive + - texlive-latex-extra + - latexmk + - os: linux + python: 3.6 + env: OPTIONAL_DEPS=1 + addons: + apt: + packages: + - libgdal-dev + - graphviz + - os: linux + python: 3.6 + env: + - OPTIONAL_DEPS=1 + - MINIMUM_REQUIREMENTS=1 + addons: + apt: + packages: + - libgdal-dev + - graphviz - os: osx language: generic - env: TRAVIS_PYTHON_VERSION=3.6 + env: + - TRAVIS_PYTHON_VERSION=3.6.0 + - OPTIONAL_DEPS=1 + - OSX_PKG_ENV=miniconda - os: osx language: generic - env: TRAVIS_PYTHON_VERSION=3.6.0 OPTIONAL_DEPS=pip OSX_PKG_ENV=miniconda + env: TRAVIS_PYTHON_VERSION=3.6 + - python: 2.7 + - python: 3.4 + - python: 3.5 + - python: 3.6 before_install: # prepare the system to install prerequisites or dependencies - source tools/travis/before_install.sh - uname -a - printenv - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + - if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then source tools/travis/osx_install.sh; else source tools/travis/linux_install.sh; @@ -50,7 +83,7 @@ install: # install required packages - pip install --upgrade pip - pip install --retries 3 -r requirements.txt - - if [[ "${OPTIONAL_DEPS}" == pip ]]; then + - if [[ "${OPTIONAL_DEPS}" == 1 ]]; then pip install --retries 3 -r requirements/extras.txt; fi # install networkx @@ -61,13 +94,18 @@ install: - pip list script: + - if [[ "${BUILD_DOCS}" == 1 ]]; then + source tools/travis/build_docs.sh; + fi - source tools/travis/script.sh after_success: - # Report coverage for 2.7 miniconda runs only. - - if [[ "${TRAVIS_PYTHON_VERSION}${OPTIONAL_DEPS}" == 2\.7pip ]]; then + - if [[ "${REPORT_COVERAGE}" == 1 ]]; then codecov; fi + - if [[ "${BUILD_DOCS}" == 1 && "${DEPLOY_DOCS}" == 1 ]]; then + source tools/travis/deploy_docs.sh; + fi notifications: email: false diff --git a/doc/Makefile b/doc/Makefile index 7edc5a13236..48273cf1abd 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -105,6 +105,11 @@ doctest: @echo "Testing of doctests in the sources finished, look at the " \ "results in build/doctest/output.txt." +latexpdf: latex + @echo "Running LaTeX files through latexmk..." + $(MAKE) -C build/latex all-pdf + @echo "latexmk finished; the PDF files are in build/latex." + gitwash-update: python ../tools/gitwash_dumper.py developer networkx \ --project-url=http://networkx.github.io \ diff --git a/doc/conf.py b/doc/conf.py index d599efbb6ae..504259ca930 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -199,7 +199,7 @@ latex_appendices = ['tutorial'] # Intersphinx mapping -intersphinx_mapping = {'https://docs.python.org/': None, +intersphinx_mapping = {'https://docs.python.org/2/': None, 'https://docs.scipy.org/doc/numpy/': None, } diff --git a/requirements/doc.txt b/requirements/doc.txt new file mode 100644 index 00000000000..47518e7eac2 --- /dev/null +++ b/requirements/doc.txt @@ -0,0 +1,6 @@ +sphinx>=1.6.3 +sphinx_rtd_theme>=0.2.4 +sphinx-gallery>=0.1.12 +pillow>=4.2.1 +nb2plots>=0.5.2 +texext>=0.5 diff --git a/requirements/extras.txt b/requirements/extras.txt index a0b1fceafd3..35e83bb69d3 100644 --- a/requirements/extras.txt +++ b/requirements/extras.txt @@ -1,6 +1,6 @@ numpy>=1.12.0 scipy>=0.19.0 -pandas>=0.20.0 +pandas>=0.20.1 matplotlib>=2.0.2 pygraphviz>=1.3.1 pydot>=1.2.3 diff --git a/tools/travis/before_install.sh b/tools/travis/before_install.sh index af1b147ac91..a03aba8e499 100755 --- a/tools/travis/before_install.sh +++ b/tools/travis/before_install.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -set -ex +set -e section () { echo -en "travis_fold:start:$1\r" @@ -12,4 +12,11 @@ section_end () { export -f section export -f section_end -set +ex +if [[ "${MINIMUM_REQUIREMENTS}" == 1 ]]; then + sed -i 's/>=/==/g' requirements/default.txt + sed -i 's/>=/==/g' requirements/extras.txt + sed -i 's/>=/==/g' requirements/test.txt + sed -i 's/>=/==/g' requirements/doc.txt +fi + +set +e diff --git a/tools/travis/build_docs.sh b/tools/travis/build_docs.sh new file mode 100755 index 00000000000..294a348d5ea --- /dev/null +++ b/tools/travis/build_docs.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e + +pip install --retries 3 -q -r requirements/doc.txt +export SPHINXCACHE=$HOME/.cache/sphinx +cd doc +make html +make doctest +make latexpdf +cd .. + +set +e diff --git a/tools/travis/deploy-key.enc b/tools/travis/deploy-key.enc new file mode 100644 index 00000000000..6b6d64d6641 --- /dev/null +++ b/tools/travis/deploy-key.enc @@ -0,0 +1,4 @@ +_'NXN�g� ƅՄ�}�L�b��˶���=���>����ɵ��f\�<X�O6��Z�>Z�c����{� � ǪN�ұ�$R����Kcä��5�#�c#0�`K�.���QqD'��U�ڈ���" _r�Q�����IRnhM ��׀��������}���0�Q���s�\���:i�)���&�)�$���f(o)�.�#�� �C���� +���4�� �I����>ݗ����ڭ$��V�2u���o��D��0`$��*�� +�� �F�s���!�}_D,�X�0���VH��,��� +�EH8�����0���.�Y���<�~m�&�ba �>]�gDŽ�l���U��X�����Z�I؈\��nH�^�s&�H���, ��: f@��h�R���������2h6���3� \ No newline at end of file diff --git a/tools/travis/deploy_docs.sh b/tools/travis/deploy_docs.sh new file mode 100755 index 00000000000..6adae88747a --- /dev/null +++ b/tools/travis/deploy_docs.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -e + +section "Deploy docs" +if [[ $TRAVIS_PULL_REQUEST == false && $TRAVIS_BRANCH == "master" && $BUILD_DOCS == 1 && $DEPLOY_DOCS == 1 ]] +then + # "A deploy key is an SSH key that is stored on your server and grants access to a single GitHub repository. + # This key is attached directly to the repository instead of to a personal user account." + # -- https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys + # + # $ ssh-keygen -t ed25519 -C "Networkx Travis Bot" -f deploy-key + # Your identification has been saved in deploy-key. + # Your public key has been saved in deploy-key.pub. + # + # Add the deploy-key.pub contents to your repo's settings under Settings -> Deploy Keys. + # Encrypt the private deploy-key for Travis-CI and commit it to the repo + # + # $ gem install travis + # $ travis login + # $ travis encrypt-file deploy-key + # storing result as deploy-key.enc + # + # The ``travis encrypt-file deploy-key`` command provides the ``openssl`` command below. + + # Decrypt the deploy-key with the Travis-CI key + openssl aes-256-cbc -K $encrypted_64abb7a9cf51_key -iv $encrypted_64abb7a9cf51_iv -in tools/travis/deploy-key.enc -out deploy-key -d + chmod 600 deploy-key + eval `ssh-agent -s` + ssh-add deploy-key + + # Push the docs to the gh-pages branch of the networkx/dev-docs repo + [email protected]:networkx/dev-docs.git + echo "-- pushing docs --" + ( + git config --global user.email "[email protected]" + git config --global user.name "NetworkX Travis Bot" + + cd doc + git clone --quiet --branch=gh-pages ${GH_REF} doc_build + cd doc_build + + # Overwrite previous commit + git rm -rf . + cp -a ../build/html/* . + cp -a ../build/latex/networkx_reference.pdf _downloads/. + touch .nojekyll + git add -A + git commit --amend --no-edit + + git push --force --quiet "${GH_REF}" gh-pages > /dev/null 2>&1 + cd ../.. + ) +else + echo "-- will only push docs from master --" +fi +section_end "Deploy docs" + +set +e diff --git a/tools/travis/linux_install.sh b/tools/travis/linux_install.sh index ca08f0171af..f3e41fe9d53 100755 --- a/tools/travis/linux_install.sh +++ b/tools/travis/linux_install.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash -set -ex +set -e # create new empty venv virtualenv -p python ~/venv source ~/venv/bin/activate -if [[ "${OPTIONAL_DEPS}" == pip ]]; then +if [[ "${OPTIONAL_DEPS}" == 1 ]]; then # needed to build Python binding for GDAL export CPLUS_INCLUDE_PATH=/usr/include/gdal @@ -29,4 +29,4 @@ EOF fi -set +ex +set +e diff --git a/tools/travis/osx_install.sh b/tools/travis/osx_install.sh index 5e597d5cfa6..0fd0879030e 100755 --- a/tools/travis/osx_install.sh +++ b/tools/travis/osx_install.sh @@ -21,7 +21,7 @@ else get_macpython_environment $TRAVIS_PYTHON_VERSION venv fi -if [[ "${OPTIONAL_DEPS}" == pip ]]; then +if [[ "${OPTIONAL_DEPS}" == 1 ]]; then if [[ "${OSX_PKG_ENV}" == miniconda ]]; then conda install graphviz export PKG_CONFIG_PATH=/Users/travis/miniconda/envs/testenv/lib/pkgconfig diff --git a/tools/travis/script.sh b/tools/travis/script.sh index 3f65b94a131..108ffe892c3 100755 --- a/tools/travis/script.sh +++ b/tools/travis/script.sh @@ -1,15 +1,17 @@ #!/usr/bin/env bash +set -e + section "Script section" -set -ex -export NX_INSTALL=`pip show networkx | grep Location | awk '{print $2"/networkx"}'`; +export NX_SOURCE=$PWD +export NX_INSTALL=$(pip show networkx | grep Location | awk '{print $2"/networkx"}') # nose 1.3.0 does not tell coverage to only cover the requested # package (except during the report). So to restrict coverage, we must # inform coverage through the .coveragerc file. -cp .coveragerc $NX_INSTALL; -cp setup.cfg $NX_INSTALL; +cp .coveragerc $NX_INSTALL +cp setup.cfg $NX_INSTALL # Move to new directory so that networkx is not imported from repository. # Why? Because we want the tests to make sure that NetworkX was installed @@ -17,11 +19,19 @@ cp setup.cfg $NX_INSTALL; # Testing from the git repository cannot catch a mistake like that. # # Export current directory for logs. -cd $NX_INSTALL; -printenv PWD; +cd $NX_INSTALL +printenv PWD # Run nosetests. -nosetests --verbosity=2 --with-ignore-docstrings --with-coverage --cover-package=networkx; +if [[ "${REPORT_COVERAGE}" == 1 ]]; then + nosetests --verbosity=2 --with-ignore-docstrings --with-coverage --cover-package=networkx + cp -a .coverage $NX_SOURCE +else + nosetests --verbosity=2 --with-ignore-docstrings +fi + +cd $NX_SOURCE -set +ex section_end "Script section" + +set +e
PrefectHQ__prefect-2467
Feature/#2439 prefect server telemetry **Thanks for contributing to Prefect!** Please describe your work and make sure your PR: - [x] adds new tests (if appropriate) - [x] updates `CHANGELOG.md` (if appropriate) - [x] updates docstrings for any new functions or function arguments, including `docs/outline.toml` for API reference docs (if appropriate) Note that your PR will not be reviewed unless all three boxes are checked. ## What does this PR change? This PR closes #2467 and adds some minimal telemetry to Prefect Server. ## Why is this PR important? This is the first step into collecting usage information that can help the Prefect team understand how Prefect Server is being used and how we can make it better.
[ { "content": "import os\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport click\nimport yaml\n\nimport prefect\nfrom prefect import config\nfrom prefect.utilities.configuration import set_temporary_config\nfrom prefect.utilities.docker_util import platform_is_linux, get_docker_ip\n\n\ndef make_env(fname=None):\n # replace localhost with postgres to use docker-compose dns\n PREFECT_ENV = dict(\n DB_CONNECTION_URL=config.server.database.connection_url.replace(\n \"localhost\", \"postgres\"\n ),\n GRAPHQL_HOST_PORT=config.server.graphql.host_port,\n UI_HOST_PORT=config.server.ui.host_port,\n )\n\n APOLLO_ENV = dict(\n HASURA_API_URL=\"http://hasura:{}/v1alpha1/graphql\".format(\n config.server.hasura.port\n ),\n HASURA_WS_URL=\"ws://hasura:{}/v1alpha1/graphql\".format(\n config.server.hasura.port\n ),\n PREFECT_API_URL=\"http://graphql:{port}{path}\".format(\n port=config.server.graphql.port, path=config.server.graphql.path\n ),\n PREFECT_API_HEALTH_URL=\"http://graphql:{port}/health\".format(\n port=config.server.graphql.port\n ),\n APOLLO_HOST_PORT=config.server.host_port,\n )\n\n POSTGRES_ENV = dict(\n POSTGRES_HOST_PORT=config.server.database.host_port,\n POSTGRES_USER=config.server.database.username,\n POSTGRES_PASSWORD=config.server.database.password,\n POSTGRES_DB=config.server.database.name,\n )\n\n UI_ENV = dict(GRAPHQL_URL=config.server.ui.graphql_url)\n\n HASURA_ENV = dict(HASURA_HOST_PORT=config.server.hasura.host_port)\n\n ENV = os.environ.copy()\n ENV.update(**PREFECT_ENV, **APOLLO_ENV, **POSTGRES_ENV, **UI_ENV, **HASURA_ENV)\n\n if fname is not None:\n list_of_pairs = [\n \"{k}={repr(v)}\".format(k=k, v=v)\n if \"\\n\" in v\n else \"{k}={v}\".format(k=k, v=v)\n for k, v in ENV.items()\n ]\n with open(fname, \"w\") as f:\n f.write(\"\\n\".join(list_of_pairs))\n return ENV.copy()\n\n\[email protected](hidden=True)\ndef server():\n \"\"\"\n Commands for interacting with the Prefect Core server\n\n \\b\n Usage:\n $ prefect server ...\n\n \\b\n Arguments:\n start ...\n\n \\b\n Examples:\n $ prefect server start\n ...\n \"\"\"\n\n\[email protected](hidden=True)\[email protected](\n \"--version\",\n \"-v\",\n help=\"The server image versions to use (for example, '0.10.0' or 'master')\",\n hidden=True,\n)\[email protected](\n \"--skip-pull\",\n help=\"Pass this flag to skip pulling new images (if available)\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-upgrade\",\n \"-n\",\n help=\"Pass this flag to avoid running a database upgrade when the database spins up\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-ui\",\n \"-u\",\n help=\"Pass this flag to avoid starting the UI\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--postgres-port\",\n help=\"The port used to serve Postgres\",\n default=config.server.database.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--hasura-port\",\n help=\"The port used to serve Hasura\",\n default=config.server.hasura.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--graphql-port\",\n help=\"The port used to serve the GraphQL API\",\n default=config.server.graphql.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--ui-port\",\n help=\"The port used to serve the UI\",\n default=config.server.ui.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--server-port\",\n help=\"The port used to serve the Core server\",\n default=config.server.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--no-postgres-port\",\n help=\"Disable port map of Postgres to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-hasura-port\",\n help=\"Disable port map of Hasura to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-graphql-port\",\n help=\"Disable port map of the GraphqlAPI to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-ui-port\", help=\"Disable port map of the UI to host\", is_flag=True, hidden=True\n)\[email protected](\n \"--no-server-port\",\n help=\"Disable port map of the Core server to host\",\n is_flag=True,\n hidden=True,\n)\ndef start(\n version,\n skip_pull,\n no_upgrade,\n no_ui,\n postgres_port,\n hasura_port,\n graphql_port,\n ui_port,\n server_port,\n no_postgres_port,\n no_hasura_port,\n no_graphql_port,\n no_ui_port,\n no_server_port,\n):\n \"\"\"\n This command spins up all infrastructure and services for the Prefect Core server\n\n \\b\n Options:\n --version, -v TEXT The server image versions to use (for example, '0.10.0' or 'master')\n Defaults to the current installed Prefect version.\n --skip-pull Flag to skip pulling new images (if available)\n --no-upgrade, -n Flag to avoid running a database upgrade when the database spins up\n --no-ui, -u Flag to avoid starting the UI\n\n \\b\n --postgres-port TEXT Port used to serve Postgres, defaults to '5432'\n --hasura-port TEXT Port used to serve Hasura, defaults to '3001'\n --graphql-port TEXT Port used to serve the GraphQL API, defaults to '4001'\n --ui-port TEXT Port used to serve the UI, defaults to '8080'\n --server-port TEXT Port used to serve the Core server, defaults to '4200'\n\n \\b\n --no-postgres-port Disable port map of Postgres to host\n --no-hasura-port Disable port map of Hasura to host\n --no-graphql-port Disable port map of the GraphQL API to host\n --no-ui-port Disable port map of the UI to host\n --no-server-port Disable port map of the Core server to host\n \"\"\"\n\n docker_dir = Path(__file__).parents[0]\n compose_dir_path = docker_dir\n\n # Remove port mappings if specified\n if (\n no_postgres_port\n or no_hasura_port\n or no_graphql_port\n or no_ui_port\n or no_server_port\n or platform_is_linux()\n ):\n temp_dir = tempfile.gettempdir()\n temp_path = os.path.join(temp_dir, \"docker-compose.yml\")\n shutil.copy2(os.path.join(docker_dir, \"docker-compose.yml\"), temp_path)\n\n with open(temp_path, \"r\") as file:\n y = yaml.safe_load(file)\n\n if no_postgres_port:\n del y[\"services\"][\"postgres\"][\"ports\"]\n\n if no_hasura_port:\n del y[\"services\"][\"hasura\"][\"ports\"]\n\n if no_graphql_port:\n del y[\"services\"][\"graphql\"][\"ports\"]\n\n if no_ui_port:\n del y[\"services\"][\"ui\"][\"ports\"]\n\n if no_server_port:\n del y[\"services\"][\"apollo\"][\"ports\"]\n\n if platform_is_linux():\n docker_internal_ip = get_docker_ip()\n for service in list(y[\"services\"]):\n y[\"services\"][service][\"extra_hosts\"] = [\n \"host.docker.internal:{}\".format(docker_internal_ip)\n ]\n\n with open(temp_path, \"w\") as f:\n y = yaml.safe_dump(y, f)\n\n compose_dir_path = temp_dir\n\n # Temporary config set for port allocation\n with set_temporary_config(\n {\n \"server.database.host_port\": postgres_port,\n \"server.hasura.host_port\": hasura_port,\n \"server.graphql.host_port\": graphql_port,\n \"server.ui.host_port\": ui_port,\n \"server.host_port\": server_port,\n }\n ):\n env = make_env()\n\n if \"PREFECT_SERVER_TAG\" not in env:\n env.update(\n PREFECT_SERVER_TAG=version\n or (\n \"master\"\n if len(prefect.__version__.split(\"+\")) > 1\n else prefect.__version__\n )\n )\n if \"PREFECT_SERVER_DB_CMD\" not in env:\n cmd = (\n \"prefect-server database upgrade -y\"\n if not no_upgrade\n else \"echo 'DATABASE MIGRATIONS SKIPPED'\"\n )\n env.update(PREFECT_SERVER_DB_CMD=cmd)\n\n proc = None\n try:\n if not skip_pull:\n subprocess.check_call(\n [\"docker-compose\", \"pull\"], cwd=compose_dir_path, env=env\n )\n\n cmd = [\"docker-compose\", \"up\"]\n if no_ui:\n cmd += [\"--scale\", \"ui=0\"]\n proc = subprocess.Popen(cmd, cwd=compose_dir_path, env=env)\n while True:\n time.sleep(0.5)\n except:\n click.secho(\n \"Exception caught; killing services (press ctrl-C to force)\",\n fg=\"white\",\n bg=\"red\",\n )\n subprocess.check_output(\n [\"docker-compose\", \"down\"], cwd=compose_dir_path, env=env\n )\n if proc:\n proc.kill()\n raise\n", "path": "src/prefect/cli/server.py" } ]
[ { "content": "import os\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport click\nimport yaml\n\nimport prefect\nfrom prefect import config\nfrom prefect.utilities.configuration import set_temporary_config\nfrom prefect.utilities.docker_util import platform_is_linux, get_docker_ip\n\n\ndef make_env(fname=None):\n # replace localhost with postgres to use docker-compose dns\n PREFECT_ENV = dict(\n DB_CONNECTION_URL=config.server.database.connection_url.replace(\n \"localhost\", \"postgres\"\n ),\n GRAPHQL_HOST_PORT=config.server.graphql.host_port,\n UI_HOST_PORT=config.server.ui.host_port,\n )\n\n APOLLO_ENV = dict(\n HASURA_API_URL=\"http://hasura:{}/v1alpha1/graphql\".format(\n config.server.hasura.port\n ),\n HASURA_WS_URL=\"ws://hasura:{}/v1alpha1/graphql\".format(\n config.server.hasura.port\n ),\n PREFECT_API_URL=\"http://graphql:{port}{path}\".format(\n port=config.server.graphql.port, path=config.server.graphql.path\n ),\n PREFECT_API_HEALTH_URL=\"http://graphql:{port}/health\".format(\n port=config.server.graphql.port\n ),\n APOLLO_HOST_PORT=config.server.host_port,\n PREFECT_SERVER__TELEMETRY__ENABLED=(\n \"true\" if config.server.telemetry.enabled is True else \"false\"\n ),\n )\n\n POSTGRES_ENV = dict(\n POSTGRES_HOST_PORT=config.server.database.host_port,\n POSTGRES_USER=config.server.database.username,\n POSTGRES_PASSWORD=config.server.database.password,\n POSTGRES_DB=config.server.database.name,\n )\n\n UI_ENV = dict(GRAPHQL_URL=config.server.ui.graphql_url)\n\n HASURA_ENV = dict(HASURA_HOST_PORT=config.server.hasura.host_port)\n\n ENV = os.environ.copy()\n ENV.update(**PREFECT_ENV, **APOLLO_ENV, **POSTGRES_ENV, **UI_ENV, **HASURA_ENV)\n\n if fname is not None:\n list_of_pairs = [\n \"{k}={repr(v)}\".format(k=k, v=v)\n if \"\\n\" in v\n else \"{k}={v}\".format(k=k, v=v)\n for k, v in ENV.items()\n ]\n with open(fname, \"w\") as f:\n f.write(\"\\n\".join(list_of_pairs))\n return ENV.copy()\n\n\[email protected](hidden=True)\ndef server():\n \"\"\"\n Commands for interacting with the Prefect Core server\n\n \\b\n Usage:\n $ prefect server ...\n\n \\b\n Arguments:\n start ...\n\n \\b\n Examples:\n $ prefect server start\n ...\n \"\"\"\n\n\[email protected](hidden=True)\[email protected](\n \"--version\",\n \"-v\",\n help=\"The server image versions to use (for example, '0.10.0' or 'master')\",\n hidden=True,\n)\[email protected](\n \"--skip-pull\",\n help=\"Pass this flag to skip pulling new images (if available)\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-upgrade\",\n \"-n\",\n help=\"Pass this flag to avoid running a database upgrade when the database spins up\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-ui\",\n \"-u\",\n help=\"Pass this flag to avoid starting the UI\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--postgres-port\",\n help=\"The port used to serve Postgres\",\n default=config.server.database.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--hasura-port\",\n help=\"The port used to serve Hasura\",\n default=config.server.hasura.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--graphql-port\",\n help=\"The port used to serve the GraphQL API\",\n default=config.server.graphql.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--ui-port\",\n help=\"The port used to serve the UI\",\n default=config.server.ui.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--server-port\",\n help=\"The port used to serve the Core server\",\n default=config.server.host_port,\n type=str,\n hidden=True,\n)\[email protected](\n \"--no-postgres-port\",\n help=\"Disable port map of Postgres to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-hasura-port\",\n help=\"Disable port map of Hasura to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-graphql-port\",\n help=\"Disable port map of the GraphqlAPI to host\",\n is_flag=True,\n hidden=True,\n)\[email protected](\n \"--no-ui-port\", help=\"Disable port map of the UI to host\", is_flag=True, hidden=True\n)\[email protected](\n \"--no-server-port\",\n help=\"Disable port map of the Core server to host\",\n is_flag=True,\n hidden=True,\n)\ndef start(\n version,\n skip_pull,\n no_upgrade,\n no_ui,\n postgres_port,\n hasura_port,\n graphql_port,\n ui_port,\n server_port,\n no_postgres_port,\n no_hasura_port,\n no_graphql_port,\n no_ui_port,\n no_server_port,\n):\n \"\"\"\n This command spins up all infrastructure and services for the Prefect Core server\n\n \\b\n Options:\n --version, -v TEXT The server image versions to use (for example, '0.10.0' or 'master')\n Defaults to the current installed Prefect version.\n --skip-pull Flag to skip pulling new images (if available)\n --no-upgrade, -n Flag to avoid running a database upgrade when the database spins up\n --no-ui, -u Flag to avoid starting the UI\n\n \\b\n --postgres-port TEXT Port used to serve Postgres, defaults to '5432'\n --hasura-port TEXT Port used to serve Hasura, defaults to '3001'\n --graphql-port TEXT Port used to serve the GraphQL API, defaults to '4001'\n --ui-port TEXT Port used to serve the UI, defaults to '8080'\n --server-port TEXT Port used to serve the Core server, defaults to '4200'\n\n \\b\n --no-postgres-port Disable port map of Postgres to host\n --no-hasura-port Disable port map of Hasura to host\n --no-graphql-port Disable port map of the GraphQL API to host\n --no-ui-port Disable port map of the UI to host\n --no-server-port Disable port map of the Core server to host\n \"\"\"\n\n docker_dir = Path(__file__).parents[0]\n compose_dir_path = docker_dir\n\n # Remove port mappings if specified\n if (\n no_postgres_port\n or no_hasura_port\n or no_graphql_port\n or no_ui_port\n or no_server_port\n or platform_is_linux()\n ):\n temp_dir = tempfile.gettempdir()\n temp_path = os.path.join(temp_dir, \"docker-compose.yml\")\n shutil.copy2(os.path.join(docker_dir, \"docker-compose.yml\"), temp_path)\n\n with open(temp_path, \"r\") as file:\n y = yaml.safe_load(file)\n\n if no_postgres_port:\n del y[\"services\"][\"postgres\"][\"ports\"]\n\n if no_hasura_port:\n del y[\"services\"][\"hasura\"][\"ports\"]\n\n if no_graphql_port:\n del y[\"services\"][\"graphql\"][\"ports\"]\n\n if no_ui_port:\n del y[\"services\"][\"ui\"][\"ports\"]\n\n if no_server_port:\n del y[\"services\"][\"apollo\"][\"ports\"]\n\n if platform_is_linux():\n docker_internal_ip = get_docker_ip()\n for service in list(y[\"services\"]):\n y[\"services\"][service][\"extra_hosts\"] = [\n \"host.docker.internal:{}\".format(docker_internal_ip)\n ]\n\n with open(temp_path, \"w\") as f:\n y = yaml.safe_dump(y, f)\n\n compose_dir_path = temp_dir\n\n # Temporary config set for port allocation\n with set_temporary_config(\n {\n \"server.database.host_port\": postgres_port,\n \"server.hasura.host_port\": hasura_port,\n \"server.graphql.host_port\": graphql_port,\n \"server.ui.host_port\": ui_port,\n \"server.host_port\": server_port,\n }\n ):\n env = make_env()\n\n if \"PREFECT_SERVER_TAG\" not in env:\n env.update(\n PREFECT_SERVER_TAG=version\n or (\n \"master\"\n if len(prefect.__version__.split(\"+\")) > 1\n else prefect.__version__\n )\n )\n if \"PREFECT_SERVER_DB_CMD\" not in env:\n cmd = (\n \"prefect-server database upgrade -y\"\n if not no_upgrade\n else \"echo 'DATABASE MIGRATIONS SKIPPED'\"\n )\n env.update(PREFECT_SERVER_DB_CMD=cmd)\n\n proc = None\n try:\n if not skip_pull:\n subprocess.check_call(\n [\"docker-compose\", \"pull\"], cwd=compose_dir_path, env=env\n )\n\n cmd = [\"docker-compose\", \"up\"]\n if no_ui:\n cmd += [\"--scale\", \"ui=0\"]\n proc = subprocess.Popen(cmd, cwd=compose_dir_path, env=env)\n while True:\n time.sleep(0.5)\n except:\n click.secho(\n \"Exception caught; killing services (press ctrl-C to force)\",\n fg=\"white\",\n bg=\"red\",\n )\n subprocess.check_output(\n [\"docker-compose\", \"down\"], cwd=compose_dir_path, env=env\n )\n if proc:\n proc.kill()\n raise\n", "path": "src/prefect/cli/server.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fec2af82e13..5fa3c51ba80c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ These changes are available in the [release/0.11.0 branch](https://github.com/Pr ### Server - Add "cancellation-lite" semantic by preventing task runs from running if the flow run isn't running - [#2535](https://github.com/PrefectHQ/prefect/pull/2535) +- Add minimal telemetry to Prefect Server [#2467](https://github.com/PrefectHQ/prefect/pull/2467) ### Task Library @@ -22,7 +23,7 @@ These changes are available in the [release/0.11.0 branch](https://github.com/Pr ### Fixes -- Fix bug in Kubernetes agent ``deployment.yaml`` with a misconfigured liveness probe - [#2519](https://github.com/PrefectHQ/prefect/pull/2519) +- Fix bug in Kubernetes agent `deployment.yaml` with a misconfigured liveness probe - [#2519](https://github.com/PrefectHQ/prefect/pull/2519) ### Deprecations diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 1ef46f6db3de..54b7c571007b 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -3,10 +3,10 @@ const sidebar98 = require('../api/0.9.8/sidebar') const glob = require('glob') // function for loading all MD files in a directory -const getChildren = function (parent_path, dir) { +const getChildren = function(parent_path, dir) { return glob .sync(parent_path + '/' + dir + '/**/*.md') - .map((path) => { + .map(path => { // remove "parent_path" and ".md" path = path.slice(parent_path.length + 1, -3) // remove README @@ -224,6 +224,11 @@ module.exports = { 'recipes/k8s_docker_sidecar' ] }, + { + title: 'Server', + collapsable: true, + children: ['server/telemetry'] + }, { title: 'FAQ', collapsable: true, diff --git a/docs/orchestration/server/telemetry.md b/docs/orchestration/server/telemetry.md new file mode 100644 index 000000000000..77827074a5bc --- /dev/null +++ b/docs/orchestration/server/telemetry.md @@ -0,0 +1,18 @@ +# Telemetry + +Prefect Server sends usage telemetry and statistics to Prefect Technologies, Inc. All information collected is anonymous. We use this information to better understand how Prefect Server is used and to ensure that we're supporting active versions. + +To opt-out of telemetry, add the following to your user configuration file (see [user configuration](../../core/concepts/configuration.md#user-configuration)). + +```toml +[server.telemetry] +enabled = false +``` + +As an environment variable this would be: + +```bash +export PREFECT_SERVER__TELEMETRY__ENABLED=false +``` + +See [configuration](../../core/concepts/configuration.md) for more details. diff --git a/server/docker/docker-compose.yml b/server/docker/docker-compose.yml index d751c9674384..5dcbde17e2c4 100644 --- a/server/docker/docker-compose.yml +++ b/server/docker/docker-compose.yml @@ -73,6 +73,7 @@ services: HASURA_API_URL: ${HASURA_API_URL:-http://hasura:3000/v1alpha1/graphql} PREFECT_API_URL: ${PREFECT_API_URL:-http://graphql:4201/graphql/} PREFECT_API_HEALTH_URL: ${PREFECT_API_HEALTH_URL:-http://graphql:4201/health} + PREFECT_SERVER__TELEMETRY__ENABLED: ${PREFECT_SERVER__TELEMETRY__ENABLED:-true} networks: - prefect-server restart: "always" diff --git a/server/services/apollo/package-lock.json b/server/services/apollo/package-lock.json index 840f8352bee7..e9d8910ac2b8 100644 --- a/server/services/apollo/package-lock.json +++ b/server/services/apollo/package-lock.json @@ -1565,6 +1565,13 @@ "deprecated-decorator": "^0.1.6", "iterall": "^1.1.3", "uuid": "^3.1.0" + }, + "dependencies": { + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + } } } } @@ -1615,6 +1622,13 @@ "deprecated-decorator": "^0.1.6", "iterall": "^1.1.3", "uuid": "^3.1.0" + }, + "dependencies": { + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + } } } } @@ -1676,6 +1690,13 @@ "deprecated-decorator": "^0.1.6", "iterall": "^1.1.3", "uuid": "^3.1.0" + }, + "dependencies": { + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + } } } } @@ -4852,6 +4873,13 @@ "deprecated-decorator": "^0.1.6", "iterall": "^1.1.3", "uuid": "^3.1.0" + }, + "dependencies": { + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + } } }, "graphql-upload": { @@ -8306,6 +8334,12 @@ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", "dev": true + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "dev": true } } }, @@ -9602,9 +9636,9 @@ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" }, "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==" }, "v8flags": { "version": "3.1.3", diff --git a/server/services/apollo/package.json b/server/services/apollo/package.json index 286d82d04a58..f78debda4cc7 100644 --- a/server/services/apollo/package.json +++ b/server/services/apollo/package.json @@ -25,7 +25,8 @@ "graphql-depth-limit": "^1.1.0", "graphql-tools": "^3.0.4", "jsonwebtoken": "^8.5.1", - "node-fetch": "^2.3.0" + "node-fetch": "^2.3.0", + "uuid": "^8.0.0" }, "devDependencies": { "@babel/cli": "^7.2.3", diff --git a/server/services/apollo/src/index.js b/server/services/apollo/src/index.js index 1a08a1b69c55..ba3b39451fb1 100644 --- a/server/services/apollo/src/index.js +++ b/server/services/apollo/src/index.js @@ -10,6 +10,7 @@ import { FilterRootFields } from 'apollo-server' import { HttpLink } from 'apollo-link-http' +import { v4 as uuidv4 } from 'uuid' const APOLLO_API_PORT = process.env.APOLLO_API_PORT || '4200' const APOLLO_API_BIND_ADDRESS = process.env.APOLLO_API_BIND_ADDRESS || '0.0.0.0' @@ -23,6 +24,12 @@ const PREFECT_API_URL = const PREFECT_API_HEALTH_URL = process.env.PREFECT_API_HEALTH_URL || 'http://localhost:4201/health' +const PREFECT_SERVER__TELEMETRY__ENABLED = + process.env.PREFECT_SERVER__TELEMETRY__ENABLED || 'false' +// Convert from a TOML boolean to a JavaScript boolean +const TELEMETRY_ENABLED = + PREFECT_SERVER__TELEMETRY__ENABLED == 'true' ? true : false +const TELEMETRY_ID = uuidv4() // -------------------------------------------------------------------- // Server const depthLimit = require('graphql-depth-limit') @@ -162,6 +169,12 @@ function sleep(ms) { async function runServerForever() { try { await runServer() + send_telemetry_event('startup') + if (TELEMETRY_ENABLED) { + setInterval(() => { + send_telemetry_event('heartbeat') + }, 600000) // send heartbeat every 10 minutes + } } catch (e) { log(e, e.message, e.stack) log('\nTrying again in 3 seconds...\n') @@ -170,4 +183,29 @@ async function runServerForever() { } } +async function send_telemetry_event(event) { + if (TELEMETRY_ENABLED) { + try { + // TODO add timeout + const body = JSON.stringify({ + source: 'prefect_server', + type: event, + payload: { id: TELEMETRY_ID } + }) + log(`Sending telemetry to Prefect Technnologies, Inc: ${body}`) + + fetch('https://sens-o-matic.prefect.io/', { + method: 'post', + body, + headers: { + 'Content-Type': 'application/json', + 'X-Prefect-Event': 'prefect_server-0.0.1' + } + }) + } catch (error) { + log(`Error sending telemetry event: ${error.message}`) + } + } +} + runServerForever() diff --git a/src/prefect/cli/docker-compose.yml b/src/prefect/cli/docker-compose.yml index c07c4f4db715..d7f8c9f80951 100644 --- a/src/prefect/cli/docker-compose.yml +++ b/src/prefect/cli/docker-compose.yml @@ -67,6 +67,7 @@ services: HASURA_API_URL: ${HASURA_API_URL:-http://hasura:3000/v1alpha1/graphql} PREFECT_API_URL: ${PREFECT_API_URL:-http://graphql:4201/graphql/} PREFECT_API_HEALTH_URL: ${PREFECT_API_HEALTH_URL:-http://graphql:4201/health} + PREFECT_SERVER__TELEMETRY__ENABLED: ${PREFECT_SERVER__TELEMETRY__ENABLED:-true} networks: - prefect-server restart: "always" diff --git a/src/prefect/cli/server.py b/src/prefect/cli/server.py index 361bb6723aaf..a848651129c8 100644 --- a/src/prefect/cli/server.py +++ b/src/prefect/cli/server.py @@ -38,6 +38,9 @@ def make_env(fname=None): port=config.server.graphql.port ), APOLLO_HOST_PORT=config.server.host_port, + PREFECT_SERVER__TELEMETRY__ENABLED=( + "true" if config.server.telemetry.enabled is True else "false" + ), ) POSTGRES_ENV = dict( diff --git a/src/prefect/config.toml b/src/prefect/config.toml index 68382483d966..0d115257280b 100644 --- a/src/prefect/config.toml +++ b/src/prefect/config.toml @@ -46,6 +46,9 @@ endpoint = "${server.host}:${server.port}" endpoint = "${server.ui.host}:${server.ui.port}" graphql_url = "http://localhost:4200/graphql" + [server.telemetry] + enabled = true + [cloud] api = "${${backend}.endpoint}" endpoint = "https://api.prefect.io"
imAsparky__django-cookiecutter-202
[BUG]: Selecting django-allauth=n does not remove django-allauth HTML templates. **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - imAsparky cookiecutter-pypackage version: - Python version: - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] **Additional context** Add any other context about the problem here.
[ { "content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\nfrom shutil import rmtree\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n\nREMOVE_FILES = [\n '{% if cookiecutter.use_pyup_io == \"n\" %} \\\n .pyup.yml {% endif %}',\n '{% if cookiecutter.include_sphinx_docs == \"n\" %} \\\n docs {% endif %}',\n '{% if cookiecutter.use_readthedocs == \"n\" %} \\\n .readthedocs.yaml {% endif %}',\n '{% if cookiecutter.include_contributor_covenant_code_of_conduct == \"n\" %} \\\n docs/source/code-of-conduct.rst {% endif %}',\n '{% if cookiecutter.include_documentation_templates == \"n\" %} \\\n docs/source/doc-templates {% endif %}',\n '{% if cookiecutter.include_how_to_contribute_template == \"n\" %} \\\n docs/source/how-tos/how-to-contribute.rst {% endif %}',\n '{% if cookiecutter.open_source_license == \"Not open source\" %} \\\n LICENSE.rst {% endif %}',\n '{% if cookiecutter.create_conventional_commits_edit_message == \"n\" %} \\\n .github/.git-commit-template.txt {% endif %}',\n '{% if cookiecutter.use_pre_commit == \"n\" %} \\\n .pre-commit-config.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n CHANGELOG.md {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/semantic.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/workflows/semantic_release.yaml {% endif %}',\n '{% if cookiecutter.create_repo_auto_test_workflow == \"n\" %} \\\n .github/workflows/test_contribution.yaml {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"n\" %} \\\n .github/ISSUE_TEMPLATE {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"y\" %} \\\n .github/ISSUE_TEMPLATE.md {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n Dockerfile {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n .dockerignore {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n compose {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n docker-entrypoint.sh {% endif %}',\n]\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef remove_files(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n\n for path in filepath:\n path = path.strip()\n if path and os.path.exists(path):\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.unlink(path)\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n supress_exception=True,\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"branch\",\n \"-M\",\n \"main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n '\"chore(git): Initial Commit\"',\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n remove_files(REMOVE_FILES)\n\n # Git options\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n", "path": "hooks/post_gen_project.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\nfrom shutil import rmtree\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n\nREMOVE_FILES = [\n '{% if cookiecutter.use_pyup_io == \"n\" %} \\\n .pyup.yml {% endif %}',\n '{% if cookiecutter.include_sphinx_docs == \"n\" %} \\\n docs {% endif %}',\n '{% if cookiecutter.use_readthedocs == \"n\" %} \\\n .readthedocs.yaml {% endif %}',\n '{% if cookiecutter.include_contributor_covenant_code_of_conduct == \"n\" %} \\\n docs/source/code-of-conduct.rst {% endif %}',\n '{% if cookiecutter.include_documentation_templates == \"n\" %} \\\n docs/source/doc-templates {% endif %}',\n '{% if cookiecutter.include_how_to_contribute_template == \"n\" %} \\\n docs/source/how-tos/how-to-contribute.rst {% endif %}',\n '{% if cookiecutter.open_source_license == \"Not open source\" %} \\\n LICENSE.rst {% endif %}',\n '{% if cookiecutter.create_conventional_commits_edit_message == \"n\" %} \\\n .github/.git-commit-template.txt {% endif %}',\n '{% if cookiecutter.use_pre_commit == \"n\" %} \\\n .pre-commit-config.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n CHANGELOG.md {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/semantic.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/workflows/semantic_release.yaml {% endif %}',\n '{% if cookiecutter.create_repo_auto_test_workflow == \"n\" %} \\\n .github/workflows/test_contribution.yaml {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"n\" %} \\\n .github/ISSUE_TEMPLATE {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"y\" %} \\\n .github/ISSUE_TEMPLATE.md {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n Dockerfile {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n .dockerignore {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n compose {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n docker-entrypoint.sh {% endif %}',\n '{% if cookiecutter.use_django_allauth == \"n\" %} \\\n templates/account {% endif %}',\n]\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef remove_files(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n\n for path in filepath:\n path = path.strip()\n if path and os.path.exists(path):\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.unlink(path)\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n supress_exception=True,\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"branch\",\n \"-M\",\n \"main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n '\"chore(git): Initial Commit\"',\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n remove_files(REMOVE_FILES)\n\n # Git options\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n", "path": "hooks/post_gen_project.py" } ]
diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py index fb39b27c..304bd5a3 100644 --- a/hooks/post_gen_project.py +++ b/hooks/post_gen_project.py @@ -53,6 +53,8 @@ compose {% endif %}', '{% if cookiecutter.deploy_with_docker == "n" %} \ docker-entrypoint.sh {% endif %}', + '{% if cookiecutter.use_django_allauth == "n" %} \ + templates/account {% endif %}', ] # Helper functions diff --git a/tests/test_bake_django.py b/tests/test_bake_django.py index 5d6b1614..8f4bd269 100644 --- a/tests/test_bake_django.py +++ b/tests/test_bake_django.py @@ -76,6 +76,24 @@ def test_baked_django_without_allauth_settings_ok(cookies): ) +def test_baked_django_with_allauth_templates_ok(cookies): + """Test Django allauth HTML templates have been generated.""" + default_django = cookies.bake() + + templates_path = default_django.project_path / "templates/account" + + assert os.path.isdir(templates_path) + + +def test_baked_django_without_allauth_templates_ok(cookies): + """Test Django allauth HTML templates have not been generated.""" + non_default_django = cookies.bake(extra_context={"use_django_allauth": "n"}) + + templates_path = non_default_django.project_path / "templates/account" + + assert not os.path.isdir(templates_path) + + def test_baked_django_with_allauth_url_ok(cookies): """Test Django allauth url.py file entry has been generated.""" default_django = cookies.bake()
yt-project__yt-2259
Index Error updating from YT-3.4.0 to YT-3.5.1 <!--To help us understand and resolve your issue, please fill out the form to the best of your ability.--> <!--You can feel free to delete the sections that do not apply.--> ### Bug report **Bug summary** Index error after yt upgrade **Code for reproduction** <!--A minimum code snippet required to reproduce the bug, also minimizing the number of dependencies required.--> <!-- If you need to use a data file to trigger the issue you're having, consider using one of the datasets from the yt data hub (http://yt-project.org/data). If your issue cannot be triggered using a public dataset, you can use the yt curldrop (https://docs.hub.yt/services.html#curldrop) to share data files. Please include a link to the dataset in the issue if you use the curldrop.--> ``` import yt from yt.units import kpc import matplotlib.pyplot as plt import numpy as np np.set_printoptions(threshold=1500) filename="/lunarc/nobackup/users/samvad/FINAL-50-0.5/output/output_00018/info_00018.txt" ds=yt.load(filename) for i in sorted(ds.derived_field_list): print(i) ``` **Actual outcome** <!--The output produced by the above code, which may be a screenshot, console output, etc.--> ``` File "fields.py", line 10, in <module> for i in sorted(ds.derived_field_list): File "yt/data_objects/static_output.py", line 216, in ireq self.index File "yt/data_objects/static_output.py", line 509, in index self, dataset_type=self.dataset_type) File "yt/frontends/ramses/data_structures.py", line 236, in __init__ super(RAMSESIndex, self).__init__(ds, dataset_type) File "yt/geometry/geometry_handler.py", line 50, in __init__ self._setup_geometry() File "yt/geometry/oct_geometry_handler.py", line 25, in _setup_geometry self._initialize_oct_handler() File "yt/frontends/ramses/data_structures.py", line 245, in _initialize_oct_handler for i in cpu_list] File "yt/frontends/ramses/data_structures.py", line 245, in <listcomp> for i in cpu_list] File "yt/frontends/ramses/data_structures.py", line 82, in __init__ self._read_amr_header() File "yt/frontends/ramses/data_structures.py", line 141, in _read_amr_header hvals.update(f.read_attrs(header)) File "yt/utilities/cython_fortran_utils.pyx", line 223, in yt.utilities.cython_fortran_utils.FortranFile.read_attrs IndexError: index 0 is out of bounds for axis 0 with size 0 ``` **Expected outcome** has to print the fields in the data. Was working with yt 3.4.0 **Version Information** <!--Please specify your platform and versions of the relevant libraries you are using:--> * Operating System: Mac * Python Version: 3.6 * yt version: 3.5.1 * Other Libraries (if applicable): installed Anaconda separately and then did conda installation of YT using 'forge' <!--Please tell us how you installed yt and python e.g., from source, pip, conda. If you installed from conda, please specify which channel you used if not the default-->
[ { "content": "\"\"\"\nDefinitions for RAMSES files\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n# These functions are RAMSES-specific\nfrom yt.config import ytcfg\nfrom yt.funcs import mylog\nimport re\n\ndef ramses_header(hvals):\n header = ( ('ncpu', 1, 'i'),\n ('ndim', 1, 'i'),\n ('nx', 3, 'i'),\n ('nlevelmax', 1, 'i'),\n ('ngridmax', 1, 'i'),\n ('nboundary', 1, 'i'),\n ('ngrid_current', 1, 'i'),\n ('boxlen', 1, 'd'),\n ('nout', 3, 'i')\n )\n yield header\n # TODO: REMOVE\n noutput, iout, ifout = hvals['nout']\n next_set = ( ('tout', noutput, 'd'),\n ('aout', noutput, 'd'),\n ('t', 1, 'd'),\n ('dtold', hvals['nlevelmax'], 'd'),\n ('dtnew', hvals['nlevelmax'], 'd'),\n ('nstep', 2, 'i'),\n ('stat', 3, 'd'),\n ('cosm', 7, 'd'),\n ('timing', 5, 'd'),\n ('mass_sph', 1, 'd') )\n yield next_set\n\nfield_aliases = {\n 'standard_five': ('Density',\n 'x-velocity',\n 'y-velocity',\n 'z-velocity',\n 'Pressure'),\n 'standard_six': ('Density',\n 'x-velocity',\n 'y-velocity',\n 'z-velocity',\n 'Pressure',\n 'Metallicity'),\n\n}\n\n## Regular expressions used to parse file descriptors\nVERSION_RE = re.compile(r'# version: *(\\d+)')\n# This will match comma-separated strings, discarding whitespaces\n# on the left hand side\nVAR_DESC_RE = re.compile(r'\\s*([^\\s]+),\\s*([^\\s]+),\\s*([^\\s]+)')\n\n\n## Configure family mapping\nparticle_families = {\n 'DM': 1,\n 'star': 2,\n 'cloud': 3,\n 'dust': 4,\n 'star_tracer': -2,\n 'cloud_tracer': -3,\n 'dust_tracer': -4,\n 'gas_tracer': 0\n}\n\nif ytcfg.has_section('ramses-families'):\n for key in particle_families.keys():\n val = ytcfg.getint('ramses-families', key, fallback=None)\n if val is not None:\n mylog.info('Changing family %s from %s to %s' % (key, particle_families[key], val))\n particle_families[key] = val\n", "path": "yt/frontends/ramses/definitions.py" } ]
[ { "content": "\"\"\"\nDefinitions for RAMSES files\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n# These functions are RAMSES-specific\nfrom yt.config import ytcfg\nfrom yt.funcs import mylog\nimport re\n\ndef ramses_header(hvals):\n header = ( ('ncpu', 1, 'i'),\n ('ndim', 1, 'i'),\n ('nx', 3, 'i'),\n ('nlevelmax', 1, 'i'),\n ('ngridmax', 1, 'i'),\n ('nboundary', 1, 'i'),\n ('ngrid_current', 1, 'i'),\n ('boxlen', 1, 'd'),\n ('nout', 3, 'i')\n )\n yield header\n # TODO: REMOVE\n noutput, iout, ifout = hvals['nout']\n next_set = ( ('tout', noutput, 'd'),\n ('aout', noutput, 'd'),\n ('t', 1, 'd'),\n ('dtold', hvals['nlevelmax'], 'd'),\n ('dtnew', hvals['nlevelmax'], 'd'),\n ('nstep', 2, 'i'),\n ('stat', 3, 'd'),\n ('cosm', 7, 'd'),\n ('timing', 5, 'd'),\n ('mass_sph', 1, 'd', True)\n )\n yield next_set\n\nfield_aliases = {\n 'standard_five': ('Density',\n 'x-velocity',\n 'y-velocity',\n 'z-velocity',\n 'Pressure'),\n 'standard_six': ('Density',\n 'x-velocity',\n 'y-velocity',\n 'z-velocity',\n 'Pressure',\n 'Metallicity'),\n\n}\n\n## Regular expressions used to parse file descriptors\nVERSION_RE = re.compile(r'# version: *(\\d+)')\n# This will match comma-separated strings, discarding whitespaces\n# on the left hand side\nVAR_DESC_RE = re.compile(r'\\s*([^\\s]+),\\s*([^\\s]+),\\s*([^\\s]+)')\n\n\n## Configure family mapping\nparticle_families = {\n 'DM': 1,\n 'star': 2,\n 'cloud': 3,\n 'dust': 4,\n 'star_tracer': -2,\n 'cloud_tracer': -3,\n 'dust_tracer': -4,\n 'gas_tracer': 0\n}\n\nif ytcfg.has_section('ramses-families'):\n for key in particle_families.keys():\n val = ytcfg.getint('ramses-families', key, fallback=None)\n if val is not None:\n mylog.info('Changing family %s from %s to %s' % (key, particle_families[key], val))\n particle_families[key] = val\n", "path": "yt/frontends/ramses/definitions.py" } ]
diff --git a/yt/frontends/ramses/definitions.py b/yt/frontends/ramses/definitions.py index 9cc8278edaf..78b20ed4e70 100644 --- a/yt/frontends/ramses/definitions.py +++ b/yt/frontends/ramses/definitions.py @@ -42,7 +42,8 @@ def ramses_header(hvals): ('stat', 3, 'd'), ('cosm', 7, 'd'), ('timing', 5, 'd'), - ('mass_sph', 1, 'd') ) + ('mass_sph', 1, 'd', True) + ) yield next_set field_aliases = { diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 768192301e1..a18ac02179d 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -373,10 +373,10 @@ def test_formation_time(): @requires_file(ramses_new_format) def test_cooling_fields(): - + #Test the field is being loaded correctly ds=yt.load(ramses_new_format) - + #Derived cooling fields assert ('gas','cooling_net') in ds.derived_field_list assert ('gas','cooling_total') in ds.derived_field_list @@ -395,7 +395,7 @@ def test_cooling_fields(): assert ('gas','heating_primordial_prime') in ds.derived_field_list assert ('gas','heating_compton_prime') in ds.derived_field_list assert ('gas','mu') in ds.derived_field_list - + #Abundances assert ('gas','Electron_number_density') in ds.derived_field_list assert ('gas','HI_number_density') in ds.derived_field_list @@ -426,3 +426,15 @@ def _mixed_field(field, data): # Access the field ds.r[('gas', 'mixed_files')] + +ramses_empty_record = "ramses_empty_record/output_00003/info_00003.txt" +@requires_file(ramses_empty_record) +def test_ramses_empty_record(): + # Test that yt can load datasets with empty records + ds = yt.load(ramses_empty_record) + + # This should not fail + ds.index + + # Access some field + ds.r[('gas', 'density')] \ No newline at end of file diff --git a/yt/utilities/cython_fortran_utils.pyx b/yt/utilities/cython_fortran_utils.pyx index f52c9a8773e..25acce3fb7c 100644 --- a/yt/utilities/cython_fortran_utils.pyx +++ b/yt/utilities/cython_fortran_utils.pyx @@ -190,8 +190,12 @@ cdef class FortranFile: attrs : iterable of iterables This object should be an iterable of one of the formats: [ (attr_name, count, struct type), ... ]. - [ ((name1,name2,name3),count, vector type] - [ ((name1,name2,name3),count, 'type type type'] + [ ((name1,name2,name3), count, vector type] + [ ((name1,name2,name3), count, 'type type type'] + [ (attr_name, count, struct type, optional)] + + `optional` : boolean. + If True, the attribute can be stored as an empty Fortran record. Returns ------- @@ -218,12 +222,28 @@ cdef class FortranFile: data = {} - for key, n, dtype in attrs: + for a in attrs: + if len(a) == 3: + key, n, dtype = a + optional = False + else: + key, n, dtype, optional = a if n == 1: - data[key] = self.read_vector(dtype)[0] + tmp = self.read_vector(dtype) + if len(tmp) == 0 and optional: + continue + elif len(tmp) == 1: + data[key] = tmp[0] + else: + raise ValueError("Expected a record of length %s, got %s" % (n, len(tmp))) else: tmp = self.read_vector(dtype) - if type(key) == tuple: + if len(tmp) == 0 and optional: + continue + elif len(tmp) != n: + raise ValueError("Expected a record of length %s, got %s" % (n, len(tmp))) + + if isinstance(key, tuple): # There are multiple keys for ikey in range(n): data[key[ikey]] = tmp[ikey]
rucio__rucio-1028
Move conveyor transfers to third_party_copy_operation Motivation ---------- At the moment read is used for source and write is used for destination, for both third_party_copy should be used.
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.3',\n 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n 'revno': 6357\n}\n", "path": "lib/rucio/vcsversion.py" } ]
[ { "content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.4',\n 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',\n 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',\n 'revno': 6402\n}\n", "path": "lib/rucio/vcsversion.py" } ]
diff --git a/doc/source/releasenotes/1.15.4.rst b/doc/source/releasenotes/1.15.4.rst new file mode 100644 index 0000000000..5bcb57f505 --- /dev/null +++ b/doc/source/releasenotes/1.15.4.rst @@ -0,0 +1,35 @@ +====== +1.15.4 +====== + +------- +General +------- + +************ +Enhancements +************ + +- Core & Internals: rucio-judge-cleaner traceback against postgres `#722 <https://github.com/rucio/rucio/issues/722>`_ +- Core & Internals: Keyword parameters should always be used for instansiating datetime.timedelta `#807 <https://github.com/rucio/rucio/issues/807>`_ +- Documentation: Add external link for ActiveMQ for people joining the project `#1001 <https://github.com/rucio/rucio/issues/1001>`_ +- Documentation: Typographical Review of Documentation Files `#985 <https://github.com/rucio/rucio/issues/985>`_ +- Release management: Add python3 in the rucio dev docker image `#973 <https://github.com/rucio/rucio/issues/973>`_ + +------- +Clients +------- + +************ +Enhancements +************ + +- Clients: section policy/support/rucio_support missing in rucio.cfg template and tests `#976 <https://github.com/rucio/rucio/issues/976>`_ + +**** +Bugs +**** + +- Clients: Error with python3: the JSON object must be str, not 'bytes' `#964 <https://github.com/rucio/rucio/issues/964>`_ +- Clients: Client AttributeError when the server returns nothing `#965 <https://github.com/rucio/rucio/issues/965>`_ +- Release management: setup_clients.py classifiers needs to be a list, not tuples `#949 <https://github.com/rucio/rucio/issues/949>`_ diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py index c3ca549c50..132ef98aad 100644 --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.15.3', - 'branch_nick': 'patch-0-Rucio_1_15_3_preparation', - 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8', - 'revno': 6357 + 'version': '1.15.4', + 'branch_nick': 'patch-0-Rucio_1_15_4_preparation', + 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb', + 'revno': 6402 } diff --git a/lib/rucio/web/ui/static/webui_version b/lib/rucio/web/ui/static/webui_version index 94a53c73c0..66dd6a22f2 100644 --- a/lib/rucio/web/ui/static/webui_version +++ b/lib/rucio/web/ui/static/webui_version @@ -1 +1 @@ -1.15.3 \ No newline at end of file +1.15.4 \ No newline at end of file
meltano__meltano-6676
bug: Test connector - asyncio ### Meltano Version 2.4.0 ### Python Version 3.9 ### Bug scope CLI (options, error messages, logging, etc.) ### Operating System Rocky Linux release 8.6 ### Description ``` meltano config tap-postgres test ``` above command works fine till version 2.1.0 and python 3.9.7. But it fails when i switched to 2.2.0,2.3.0 and 2.4.0 ``` Exception ignored in: <function BaseSubprocessTransport.__del__ at 0x7fcad8ae8670> Traceback (most recent call last): File "/usr/lib64/python3.9/asyncio/base_subprocess.py", line 126, in __del__ self.close() File "/usr/lib64/python3.9/asyncio/base_subprocess.py", line 104, in close proto.pipe.close() File "/usr/lib64/python3.9/asyncio/unix_events.py", line 536, in close self._close(None) File "/usr/lib64/python3.9/asyncio/unix_events.py", line 560, in _close self._loop.call_soon(self._call_connection_lost, exc) File "/usr/lib64/python3.9/asyncio/base_events.py", line 746, in call_soon self._check_closed() File "/usr/lib64/python3.9/asyncio/base_events.py", line 510, in _check_closed raise RuntimeError('Event loop is closed') RuntimeError: Event loop is closed ``` To fix it , I changed file in `cli/config.y`: ```diff -- is_valid, detail = asyncio.run(_validate()) (removed) ++is_valid, detail = asyncio.new_event_loop().run_until_complete(_validate()) (added) ``` Kindly get it check at your end and fix it in coming releases. ### Code To fix it, I changed file in `cli/config.py`: ```diff - is_valid, detail = asyncio.run(_validate()) + is_valid, detail = asyncio.new_event_loop().run_until_complete(_validate()) ```
[ { "content": "\"\"\"Plugin invoker class.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport enum\nimport logging\nimport os\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom pathlib import Path\nfrom typing import Any, Generator\n\nfrom structlog.stdlib import get_logger\n\nfrom meltano.core.container.container_service import ContainerService\nfrom meltano.core.logging.utils import SubprocessOutputWriter\n\nfrom .error import Error\nfrom .plugin import PluginRef\nfrom .plugin.config_service import PluginConfigService\nfrom .plugin.project_plugin import ProjectPlugin\nfrom .plugin.settings_service import PluginSettingsService\nfrom .project import Project\nfrom .project_plugins_service import ProjectPluginsService\nfrom .project_settings_service import ProjectSettingsService\nfrom .settings_service import FeatureFlags\nfrom .utils import expand_env_vars\nfrom .venv_service import VenvService, VirtualEnv\n\nlogger = get_logger(__name__)\n\n\ndef invoker_factory(project, plugin: ProjectPlugin, *args, **kwargs):\n \"\"\"Instantiate a plugin invoker from a project plugin.\n\n Args:\n project: Meltano project.\n plugin: Plugin instance.\n args: Invoker constructor positional arguments.\n kwargs: Invoker constructor keyword arguments.\n\n Returns:\n A plugin invoker.\n \"\"\"\n cls = PluginInvoker # noqa: WPS117\n\n if hasattr(plugin, \"invoker_class\"): # noqa: WPS421\n cls = plugin.invoker_class # noqa: WPS117\n\n return cls(project, plugin, *args, **kwargs)\n\n\nclass InvokerError(Error):\n \"\"\"Generic plugin invoker error.\"\"\"\n\n\nclass ExecutableNotFoundError(InvokerError):\n \"\"\"Occurs when the executable could not be found.\"\"\"\n\n def __init__(self, plugin: PluginRef, executable: str):\n \"\"\"Initialize ExecutableNotFoundError.\n\n Args:\n plugin: Meltano plugin reference.\n executable: Plugin command executable.\n \"\"\"\n plugin_type_descriptor = plugin.type.descriptor.capitalize()\n plugin_type = plugin.type.singular\n super().__init__(\n f\"Executable '{executable}' could not be found. \"\n + f\"{plugin_type_descriptor} '{plugin.name}' may not have \"\n + \"been installed yet using \"\n + f\"`meltano install {plugin_type} {plugin.name}`, \"\n + \"or the executable name may be incorrect.\"\n )\n\n\nclass InvokerNotPreparedError(InvokerError):\n \"\"\"Occurs when `invoke` is called before `prepare`.\"\"\"\n\n\nclass UnknownCommandError(InvokerError):\n \"\"\"Occurs when `invoke` is called in command mode with an undefined command.\"\"\"\n\n def __init__(self, plugin: PluginRef, command):\n \"\"\"Initialize UnknownCommandError.\n\n Args:\n plugin: Meltano plugin reference.\n command: Plugin command name.\n \"\"\"\n self.plugin = plugin\n self.command = command\n\n def __str__(self):\n \"\"\"Return error message.\n\n Returns:\n String representation of this exception.\n \"\"\"\n if self.plugin.supported_commands:\n supported_commands = \", \".join(self.plugin.supported_commands)\n desc = f\"supports the following commands: {supported_commands}\"\n else:\n desc = \"does not define any commands.\"\n plugin_type_descriptor = self.plugin.type.descriptor.capitalize()\n plugin_name = self.plugin.name\n return \" \".join(\n [\n f\"Command '{self.command}' could not be found.\",\n f\"{plugin_type_descriptor} '{plugin_name}'\",\n desc,\n ]\n )\n\n\nclass PluginInvoker: # noqa: WPS214, WPS230\n \"\"\"This class handles the invocation of a `ProjectPlugin` instance.\"\"\"\n\n class StdioSource(str, enum.Enum):\n \"\"\"Describes the available unix style std io sources.\"\"\"\n\n STDIN = \"stdin\"\n STDOUT = \"stdout\"\n STDERR = \"stderr\"\n\n def __init__(\n self,\n project: Project,\n plugin: ProjectPlugin,\n context: Any | None = None,\n output_handlers: dict | None = None,\n run_dir: Path | None = None,\n config_dir: Path | None = None,\n venv_service: VenvService | None = None,\n plugins_service: ProjectPluginsService | None = None,\n plugin_config_service: PluginConfigService | None = None,\n plugin_settings_service: PluginSettingsService | None = None,\n ):\n \"\"\"Create a new plugin invoker.\n\n Args:\n project: Meltano Project.\n plugin: Meltano Plugin.\n context: Invocation context.\n output_handlers: Logging and output handlers.\n run_dir: Execution directory.\n config_dir: Configuration files directory.\n venv_service: Virtual Environment manager.\n plugins_service: Plugin manager.\n plugin_config_service: Plugin Configuration manager.\n plugin_settings_service: Plugin Settings manager.\n \"\"\"\n self.project = project\n self.plugin = plugin\n self.context = context\n self.output_handlers = output_handlers\n\n self.venv_service: VenvService | None = None\n if plugin.pip_url or venv_service:\n self.venv_service = venv_service or VenvService(\n project,\n name=plugin.venv_name,\n namespace=plugin.type,\n )\n self.plugin_config_service = plugin_config_service or PluginConfigService(\n plugin,\n config_dir or self.project.plugin_dir(plugin),\n run_dir or self.project.run_dir(plugin.name),\n )\n\n self.plugins_service = plugins_service or ProjectPluginsService(project)\n self.settings_service = plugin_settings_service or PluginSettingsService(\n project,\n plugin,\n plugins_service=self.plugins_service,\n )\n\n self._prepared = False\n self.plugin_config = {}\n self.plugin_config_processed = {}\n self.plugin_config_extras = {}\n self.plugin_config_env = {}\n\n @property\n def capabilities(self):\n \"\"\"Get plugin immutable capabilities.\n\n Makes sure the capabilities are immutable from the `PluginInvoker` interface.\n\n Returns:\n The set of plugin capabilities.\n \"\"\"\n return frozenset(self.plugin.capabilities)\n\n @property\n def files(self) -> dict[str, Path]:\n \"\"\"Get all config and output files of the plugin.\n\n Returns:\n A mapping of file IDs to file names.\n \"\"\"\n plugin_files = {**self.plugin.config_files, **self.plugin.output_files}\n\n return {\n _key: self.plugin_config_service.run_dir.joinpath(filename)\n for _key, filename in plugin_files.items()\n }\n\n async def prepare(self, session):\n \"\"\"Prepare plugin config.\n\n Args:\n session: Database session.\n \"\"\"\n self.plugin_config = self.settings_service.as_dict(\n extras=False, session=session\n )\n self.plugin_config_processed = self.settings_service.as_dict(\n extras=False, process=True, session=session\n )\n self.plugin_config_extras = self.settings_service.as_dict(\n extras=True, session=session\n )\n self.plugin_config_env = self.settings_service.as_env(session=session)\n\n async with self.plugin.trigger_hooks(\"configure\", self, session):\n self.plugin_config_service.configure()\n self._prepared = True\n\n async def cleanup(self):\n \"\"\"Reset the plugin config.\"\"\"\n self.plugin_config = {}\n self.plugin_config_processed = {}\n self.plugin_config_extras = {}\n self.plugin_config_env = {}\n\n async with self.plugin.trigger_hooks(\"cleanup\", self):\n self._prepared = False\n\n @asynccontextmanager\n async def prepared(self, session):\n \"\"\"Context manager that prepares plugin config.\n\n Args:\n session: Database session.\n\n Yields:\n Yields to the caller, then resetting the config.\n \"\"\"\n try: # noqa: WPS229. Allow try body of length > 1.\n await self.prepare(session)\n yield\n finally:\n await self.cleanup()\n\n def exec_path(self, executable: str | None = None) -> str | Path:\n \"\"\"Return the absolute path to the executable.\n\n Uses the plugin executable if none is specified.\n\n Args:\n executable: Optional executable string.\n\n Returns:\n Full path to the executable.\n \"\"\"\n executable = executable or self.plugin.executable\n if not self.venv_service:\n if \"/\" not in executable.replace(\"\\\\\", \"/\"):\n # Expect executable on path\n return executable\n\n # Return executable relative to project directory\n return self.project.root.joinpath(executable)\n\n # Return executable within venv\n return self.venv_service.exec_path(executable)\n\n def exec_args(self, *args, command=None, env=None):\n \"\"\"Materialize the arguments to be passed to the executable.\n\n Args:\n args: Optional plugin args.\n command: Plugin command name.\n env: Environment variables\n\n Returns:\n List of plugin invocation arguments.\n \"\"\"\n env = env or {}\n executable = self.exec_path()\n if command:\n command_config = self.find_command(command)\n plugin_args = command_config.expanded_args(command, env)\n if command_config.executable:\n executable = self.exec_path(command_config.executable)\n else:\n plugin_args = self.plugin.exec_args(self)\n\n return [str(arg) for arg in (executable, *plugin_args, *args)]\n\n def find_command(self, name):\n \"\"\"Find a Command by name.\n\n Args:\n name: Command name.\n\n Returns:\n Command instance.\n\n Raises:\n UnknownCommandError: If command is not defined.\n \"\"\"\n try:\n return self.plugin.all_commands[name]\n except KeyError as err:\n raise UnknownCommandError(self.plugin, name) from err\n\n def env(self):\n \"\"\"Environment variable mapping.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n project_settings_service = ProjectSettingsService(\n self.project, config_service=self.plugins_service.config_service\n )\n with project_settings_service.feature_flag(\n FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False\n ) as strict_env_var_mode:\n\n # Expand root env w/ os.environ\n expanded_project_env = expand_env_vars(\n project_settings_service.env,\n os.environ,\n raise_if_missing=strict_env_var_mode,\n )\n expanded_project_env.update(\n expand_env_vars(\n self.settings_service.project.dotenv_env,\n os.environ,\n raise_if_missing=strict_env_var_mode,\n )\n )\n # Expand active env w/ expanded root env\n expanded_active_env = (\n expand_env_vars(\n self.settings_service.project.active_environment.env,\n expanded_project_env,\n raise_if_missing=strict_env_var_mode,\n )\n if self.settings_service.project.active_environment\n else {}\n )\n\n # Expand root plugin env w/ expanded active env\n expanded_root_plugin_env = expand_env_vars(\n self.settings_service.plugin.env,\n expanded_active_env,\n raise_if_missing=strict_env_var_mode,\n )\n\n # Expand active env plugin env w/ expanded root plugin env\n expanded_active_env_plugin_env = (\n expand_env_vars(\n self.settings_service.environment_plugin_config.env,\n expanded_root_plugin_env,\n raise_if_missing=strict_env_var_mode,\n )\n if self.settings_service.environment_plugin_config\n else {}\n )\n\n env = {\n **expanded_project_env,\n **self.project.dotenv_env,\n **self.settings_service.env,\n **self.plugin_config_env,\n **expanded_root_plugin_env,\n **expanded_active_env,\n **expanded_active_env_plugin_env,\n }\n\n # Ensure Meltano venv is not inherited\n env.pop(\"VIRTUAL_ENV\", None)\n env.pop(\"PYTHONPATH\", None)\n if self.venv_service:\n # Switch to plugin-specific venv\n venv = VirtualEnv(\n self.project.venvs_dir(self.plugin.type, self.plugin.name)\n )\n venv_dir = str(venv.bin_dir)\n env[\"VIRTUAL_ENV\"] = str(venv.root)\n env[\"PATH\"] = os.pathsep.join([venv_dir, env[\"PATH\"]])\n\n return env\n\n def Popen_options(self) -> dict[str, Any]: # noqa: N802\n \"\"\"Get options for subprocess.Popen.\n\n Returns:\n Mapping of subprocess options.\n \"\"\"\n return {}\n\n @asynccontextmanager\n async def _invoke(\n self,\n *args: str,\n require_preparation: bool = True,\n env: dict[str, Any] | None = None,\n command: str | None = None,\n **kwargs,\n ) -> Generator[list[str], dict[str, Any], dict[str, Any]]: # noqa: WPS221\n env = env or {}\n\n if require_preparation and not self._prepared:\n raise InvokerNotPreparedError()\n\n async with self.plugin.trigger_hooks(\"invoke\", self, args):\n popen_options = {**self.Popen_options(), **kwargs}\n popen_env = {**self.env(), **env}\n popen_args = self.exec_args(*args, command=command, env=popen_env)\n logging.debug(f\"Invoking: {popen_args}\")\n logging.debug(f\"Env: {popen_env}\")\n\n try:\n yield (popen_args, popen_options, popen_env)\n except FileNotFoundError as err:\n raise ExecutableNotFoundError(\n self.plugin, self.plugin.executable\n ) from err\n\n async def invoke_async(self, *args, **kwargs):\n \"\"\"Invoke a command.\n\n Args:\n args: Positional arguments.\n kwargs: Keyword arguments.\n\n Returns:\n Subprocess.\n \"\"\"\n async with self._invoke(*args, **kwargs) as (\n popen_args,\n popen_options,\n popen_env,\n ):\n return await asyncio.create_subprocess_exec(\n *popen_args,\n **popen_options,\n env=popen_env,\n )\n\n async def invoke_docker(self, plugin_command: str, *args, **kwargs) -> int:\n \"\"\"Invoke a containerized command.\n\n Args:\n plugin_command: Plugin command name.\n args: Command line invocation arguments.\n kwargs: Command line invocation keyword arguments.\n\n Raises:\n ValueError: If the command doesn't declare a container spec.\n\n Returns:\n The container run exit code.\n \"\"\"\n command_config = self.find_command(plugin_command)\n\n if not command_config.container_spec:\n raise ValueError(\"Command is missing a container spec\")\n\n spec = command_config.container_spec\n service = ContainerService()\n\n logger.debug(\"Running containerized command\", command=plugin_command)\n async with self._invoke(*args, **kwargs) as (proc_args, _, proc_env):\n plugin_name = self.plugin.name\n random_id = uuid.uuid4()\n name = f\"meltano-{plugin_name}--{plugin_command}-{random_id}\"\n\n info = await service.run_container(spec, name, env=proc_env)\n\n return info[\"State\"][\"ExitCode\"]\n\n async def dump(self, file_id: str) -> str:\n \"\"\"Dump a plugin file by id.\n\n Args:\n file_id: Dump this file identifier.\n\n Returns:\n File contents.\n\n Raises:\n __cause__: If file is not found.\n \"\"\"\n try: # noqa: WPS229. Allow try body of length > 1.\n if file_id != \"config\":\n async with self._invoke():\n return self.files[file_id].read_text()\n\n return self.files[file_id].read_text()\n except ExecutableNotFoundError as err: # noqa: WPS329. Allow \"useless\" except.\n # Unwrap FileNotFoundError\n raise err.__cause__ # noqa: WPS609. Allow accessing magic attribute.\n\n def add_output_handler(self, src: str, handler: SubprocessOutputWriter):\n \"\"\"Append an output handler for a given stdio stream.\n\n Args:\n src: stdio source you'd like to subscribe, likely either 'stdout' or 'stderr'\n handler: either a StreamWriter or an object matching the utils.SubprocessOutputWriter proto\n \"\"\"\n if self.output_handlers:\n self.output_handlers[src].append(handler)\n else:\n self.output_handlers = {src: [handler]}\n", "path": "src/meltano/core/plugin_invoker.py" } ]
[ { "content": "\"\"\"Plugin invoker class.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport enum\nimport logging\nimport os\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom pathlib import Path\nfrom typing import Any, Generator\n\nfrom structlog.stdlib import get_logger\n\nfrom meltano.core.container.container_service import ContainerService\nfrom meltano.core.logging.utils import SubprocessOutputWriter\n\nfrom .error import Error\nfrom .plugin import PluginRef\nfrom .plugin.config_service import PluginConfigService\nfrom .plugin.project_plugin import ProjectPlugin\nfrom .plugin.settings_service import PluginSettingsService\nfrom .project import Project\nfrom .project_plugins_service import ProjectPluginsService\nfrom .project_settings_service import ProjectSettingsService\nfrom .settings_service import FeatureFlags\nfrom .utils import expand_env_vars\nfrom .venv_service import VenvService, VirtualEnv\n\nlogger = get_logger(__name__)\n\n\ndef invoker_factory(project, plugin: ProjectPlugin, *args, **kwargs):\n \"\"\"Instantiate a plugin invoker from a project plugin.\n\n Args:\n project: Meltano project.\n plugin: Plugin instance.\n args: Invoker constructor positional arguments.\n kwargs: Invoker constructor keyword arguments.\n\n Returns:\n A plugin invoker.\n \"\"\"\n cls = PluginInvoker # noqa: WPS117\n\n if hasattr(plugin, \"invoker_class\"): # noqa: WPS421\n cls = plugin.invoker_class # noqa: WPS117\n\n return cls(project, plugin, *args, **kwargs)\n\n\nclass InvokerError(Error):\n \"\"\"Generic plugin invoker error.\"\"\"\n\n\nclass ExecutableNotFoundError(InvokerError):\n \"\"\"Occurs when the executable could not be found.\"\"\"\n\n def __init__(self, plugin: PluginRef, executable: str):\n \"\"\"Initialize ExecutableNotFoundError.\n\n Args:\n plugin: Meltano plugin reference.\n executable: Plugin command executable.\n \"\"\"\n plugin_type_descriptor = plugin.type.descriptor.capitalize()\n plugin_type = plugin.type.singular\n super().__init__(\n f\"Executable '{executable}' could not be found. \"\n + f\"{plugin_type_descriptor} '{plugin.name}' may not have \"\n + \"been installed yet using \"\n + f\"`meltano install {plugin_type} {plugin.name}`, \"\n + \"or the executable name may be incorrect.\"\n )\n\n\nclass InvokerNotPreparedError(InvokerError):\n \"\"\"Occurs when `invoke` is called before `prepare`.\"\"\"\n\n\nclass UnknownCommandError(InvokerError):\n \"\"\"Occurs when `invoke` is called in command mode with an undefined command.\"\"\"\n\n def __init__(self, plugin: PluginRef, command):\n \"\"\"Initialize UnknownCommandError.\n\n Args:\n plugin: Meltano plugin reference.\n command: Plugin command name.\n \"\"\"\n self.plugin = plugin\n self.command = command\n\n def __str__(self):\n \"\"\"Return error message.\n\n Returns:\n String representation of this exception.\n \"\"\"\n if self.plugin.supported_commands:\n supported_commands = \", \".join(self.plugin.supported_commands)\n desc = f\"supports the following commands: {supported_commands}\"\n else:\n desc = \"does not define any commands.\"\n plugin_type_descriptor = self.plugin.type.descriptor.capitalize()\n plugin_name = self.plugin.name\n return \" \".join(\n [\n f\"Command '{self.command}' could not be found.\",\n f\"{plugin_type_descriptor} '{plugin_name}'\",\n desc,\n ]\n )\n\n\nclass PluginInvoker: # noqa: WPS214, WPS230\n \"\"\"This class handles the invocation of a `ProjectPlugin` instance.\"\"\"\n\n class StdioSource(str, enum.Enum):\n \"\"\"Describes the available unix style std io sources.\"\"\"\n\n STDIN = \"stdin\"\n STDOUT = \"stdout\"\n STDERR = \"stderr\"\n\n def __init__(\n self,\n project: Project,\n plugin: ProjectPlugin,\n context: Any | None = None,\n output_handlers: dict | None = None,\n run_dir: Path | None = None,\n config_dir: Path | None = None,\n venv_service: VenvService | None = None,\n plugins_service: ProjectPluginsService | None = None,\n plugin_config_service: PluginConfigService | None = None,\n plugin_settings_service: PluginSettingsService | None = None,\n ):\n \"\"\"Create a new plugin invoker.\n\n Args:\n project: Meltano Project.\n plugin: Meltano Plugin.\n context: Invocation context.\n output_handlers: Logging and output handlers.\n run_dir: Execution directory.\n config_dir: Configuration files directory.\n venv_service: Virtual Environment manager.\n plugins_service: Plugin manager.\n plugin_config_service: Plugin Configuration manager.\n plugin_settings_service: Plugin Settings manager.\n \"\"\"\n self.project = project\n self.plugin = plugin\n self.context = context\n self.output_handlers = output_handlers\n\n self.venv_service: VenvService | None = None\n if plugin.pip_url or venv_service:\n self.venv_service = venv_service or VenvService(\n project,\n name=plugin.venv_name,\n namespace=plugin.type,\n )\n self.plugin_config_service = plugin_config_service or PluginConfigService(\n plugin,\n config_dir or self.project.plugin_dir(plugin),\n run_dir or self.project.run_dir(plugin.name),\n )\n\n self.plugins_service = plugins_service or ProjectPluginsService(project)\n self.settings_service = plugin_settings_service or PluginSettingsService(\n project,\n plugin,\n plugins_service=self.plugins_service,\n )\n\n self._prepared = False\n self.plugin_config = {}\n self.plugin_config_processed = {}\n self.plugin_config_extras = {}\n self.plugin_config_env = {}\n\n @property\n def capabilities(self):\n \"\"\"Get plugin immutable capabilities.\n\n Makes sure the capabilities are immutable from the `PluginInvoker` interface.\n\n Returns:\n The set of plugin capabilities.\n \"\"\"\n return frozenset(self.plugin.capabilities)\n\n @property\n def files(self) -> dict[str, Path]:\n \"\"\"Get all config and output files of the plugin.\n\n Returns:\n A mapping of file IDs to file names.\n \"\"\"\n plugin_files = {**self.plugin.config_files, **self.plugin.output_files}\n\n return {\n _key: self.plugin_config_service.run_dir.joinpath(filename)\n for _key, filename in plugin_files.items()\n }\n\n async def prepare(self, session):\n \"\"\"Prepare plugin config.\n\n Args:\n session: Database session.\n \"\"\"\n self.plugin_config = self.settings_service.as_dict(\n extras=False, session=session\n )\n self.plugin_config_processed = self.settings_service.as_dict(\n extras=False, process=True, session=session\n )\n self.plugin_config_extras = self.settings_service.as_dict(\n extras=True, session=session\n )\n self.plugin_config_env = self.settings_service.as_env(session=session)\n\n async with self.plugin.trigger_hooks(\"configure\", self, session):\n self.plugin_config_service.configure()\n self._prepared = True\n\n async def cleanup(self):\n \"\"\"Reset the plugin config.\"\"\"\n self.plugin_config = {}\n self.plugin_config_processed = {}\n self.plugin_config_extras = {}\n self.plugin_config_env = {}\n\n async with self.plugin.trigger_hooks(\"cleanup\", self):\n self._prepared = False\n\n @asynccontextmanager\n async def prepared(self, session):\n \"\"\"Context manager that prepares plugin config.\n\n Args:\n session: Database session.\n\n Yields:\n Yields to the caller, then resetting the config.\n \"\"\"\n try: # noqa: WPS229. Allow try body of length > 1.\n await self.prepare(session)\n yield\n finally:\n await self.cleanup()\n\n def exec_path(self, executable: str | None = None) -> str | Path:\n \"\"\"Return the absolute path to the executable.\n\n Uses the plugin executable if none is specified.\n\n Args:\n executable: Optional executable string.\n\n Returns:\n Full path to the executable.\n \"\"\"\n executable = executable or self.plugin.executable\n if not self.venv_service:\n if \"/\" not in executable.replace(\"\\\\\", \"/\"):\n # Expect executable on path\n return executable\n\n # Return executable relative to project directory\n return self.project.root.joinpath(executable)\n\n # Return executable within venv\n return self.venv_service.exec_path(executable)\n\n def exec_args(self, *args, command=None, env=None):\n \"\"\"Materialize the arguments to be passed to the executable.\n\n Args:\n args: Optional plugin args.\n command: Plugin command name.\n env: Environment variables\n\n Returns:\n List of plugin invocation arguments.\n \"\"\"\n env = env or {}\n executable = self.exec_path()\n if command:\n command_config = self.find_command(command)\n plugin_args = command_config.expanded_args(command, env)\n if command_config.executable:\n executable = self.exec_path(command_config.executable)\n else:\n plugin_args = self.plugin.exec_args(self)\n\n return [str(arg) for arg in (executable, *plugin_args, *args)]\n\n def find_command(self, name):\n \"\"\"Find a Command by name.\n\n Args:\n name: Command name.\n\n Returns:\n Command instance.\n\n Raises:\n UnknownCommandError: If command is not defined.\n \"\"\"\n try:\n return self.plugin.all_commands[name]\n except KeyError as err:\n raise UnknownCommandError(self.plugin, name) from err\n\n def env(self):\n \"\"\"Environment variable mapping.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n project_settings_service = ProjectSettingsService(\n self.project, config_service=self.plugins_service.config_service\n )\n with project_settings_service.feature_flag(\n FeatureFlags.STRICT_ENV_VAR_MODE, raise_error=False\n ) as strict_env_var_mode:\n\n # Expand root env w/ os.environ\n expanded_project_env = expand_env_vars(\n project_settings_service.env,\n os.environ,\n raise_if_missing=strict_env_var_mode,\n )\n expanded_project_env.update(\n expand_env_vars(\n self.settings_service.project.dotenv_env,\n os.environ,\n raise_if_missing=strict_env_var_mode,\n )\n )\n # Expand active env w/ expanded root env\n expanded_active_env = (\n expand_env_vars(\n self.settings_service.project.active_environment.env,\n expanded_project_env,\n raise_if_missing=strict_env_var_mode,\n )\n if self.settings_service.project.active_environment\n else {}\n )\n\n # Expand root plugin env w/ expanded active env\n expanded_root_plugin_env = expand_env_vars(\n self.settings_service.plugin.env,\n expanded_active_env,\n raise_if_missing=strict_env_var_mode,\n )\n\n # Expand active env plugin env w/ expanded root plugin env\n expanded_active_env_plugin_env = (\n expand_env_vars(\n self.settings_service.environment_plugin_config.env,\n expanded_root_plugin_env,\n raise_if_missing=strict_env_var_mode,\n )\n if self.settings_service.environment_plugin_config\n else {}\n )\n\n env = {\n **expanded_project_env,\n **self.project.dotenv_env,\n **self.settings_service.env,\n **self.plugin_config_env,\n **expanded_root_plugin_env,\n **expanded_active_env,\n **expanded_active_env_plugin_env,\n }\n\n # Ensure Meltano venv is not inherited\n env.pop(\"VIRTUAL_ENV\", None)\n env.pop(\"PYTHONPATH\", None)\n if self.venv_service:\n # Switch to plugin-specific venv\n venv = VirtualEnv(\n self.project.venvs_dir(self.plugin.type, self.plugin.name)\n )\n venv_dir = str(venv.bin_dir)\n env[\"VIRTUAL_ENV\"] = str(venv.root)\n env[\"PATH\"] = os.pathsep.join([venv_dir, env[\"PATH\"]])\n\n return env\n\n def Popen_options(self) -> dict[str, Any]: # noqa: N802\n \"\"\"Get options for subprocess.Popen.\n\n Returns:\n Mapping of subprocess options.\n \"\"\"\n return {}\n\n @asynccontextmanager\n async def _invoke(\n self,\n *args: str,\n require_preparation: bool = True,\n env: dict[str, Any] | None = None,\n command: str | None = None,\n **kwargs,\n ) -> Generator[list[str], dict[str, Any], dict[str, Any]]: # noqa: WPS221\n env = env or {}\n\n if require_preparation and not self._prepared:\n raise InvokerNotPreparedError()\n\n async with self.plugin.trigger_hooks(\"invoke\", self, args):\n popen_options = {**self.Popen_options(), **kwargs}\n popen_env = {**self.env(), **env}\n popen_args = self.exec_args(*args, command=command, env=popen_env)\n logging.debug(f\"Invoking: {popen_args}\")\n logging.debug(f\"Env: {popen_env}\")\n\n try:\n yield (popen_args, popen_options, popen_env)\n except FileNotFoundError as err:\n raise ExecutableNotFoundError(\n self.plugin, self.plugin.executable\n ) from err\n\n async def invoke_async(self, *args, **kwargs) -> asyncio.subprocess.Process:\n \"\"\"Invoke a command.\n\n Args:\n args: Positional arguments.\n kwargs: Keyword arguments.\n\n Returns:\n Subprocess.\n \"\"\"\n async with self._invoke(*args, **kwargs) as (\n popen_args,\n popen_options,\n popen_env,\n ):\n return await asyncio.create_subprocess_exec(\n *popen_args,\n **popen_options,\n env=popen_env,\n )\n\n async def invoke_docker(self, plugin_command: str, *args, **kwargs) -> int:\n \"\"\"Invoke a containerized command.\n\n Args:\n plugin_command: Plugin command name.\n args: Command line invocation arguments.\n kwargs: Command line invocation keyword arguments.\n\n Raises:\n ValueError: If the command doesn't declare a container spec.\n\n Returns:\n The container run exit code.\n \"\"\"\n command_config = self.find_command(plugin_command)\n\n if not command_config.container_spec:\n raise ValueError(\"Command is missing a container spec\")\n\n spec = command_config.container_spec\n service = ContainerService()\n\n logger.debug(\"Running containerized command\", command=plugin_command)\n async with self._invoke(*args, **kwargs) as (proc_args, _, proc_env):\n plugin_name = self.plugin.name\n random_id = uuid.uuid4()\n name = f\"meltano-{plugin_name}--{plugin_command}-{random_id}\"\n\n info = await service.run_container(spec, name, env=proc_env)\n\n return info[\"State\"][\"ExitCode\"]\n\n async def dump(self, file_id: str) -> str:\n \"\"\"Dump a plugin file by id.\n\n Args:\n file_id: Dump this file identifier.\n\n Returns:\n File contents.\n\n Raises:\n __cause__: If file is not found.\n \"\"\"\n try: # noqa: WPS229. Allow try body of length > 1.\n if file_id != \"config\":\n async with self._invoke():\n return self.files[file_id].read_text()\n\n return self.files[file_id].read_text()\n except ExecutableNotFoundError as err: # noqa: WPS329. Allow \"useless\" except.\n # Unwrap FileNotFoundError\n raise err.__cause__ # noqa: WPS609. Allow accessing magic attribute.\n\n def add_output_handler(self, src: str, handler: SubprocessOutputWriter):\n \"\"\"Append an output handler for a given stdio stream.\n\n Args:\n src: stdio source you'd like to subscribe, likely either 'stdout' or 'stderr'\n handler: either a StreamWriter or an object matching the utils.SubprocessOutputWriter proto\n \"\"\"\n if self.output_handlers:\n self.output_handlers[src].append(handler)\n else:\n self.output_handlers = {src: [handler]}\n", "path": "src/meltano/core/plugin_invoker.py" } ]
diff --git a/src/meltano/core/plugin_invoker.py b/src/meltano/core/plugin_invoker.py index 7d6d238391..6f54d2c952 100644 --- a/src/meltano/core/plugin_invoker.py +++ b/src/meltano/core/plugin_invoker.py @@ -433,7 +433,7 @@ async def _invoke( self.plugin, self.plugin.executable ) from err - async def invoke_async(self, *args, **kwargs): + async def invoke_async(self, *args, **kwargs) -> asyncio.subprocess.Process: """Invoke a command. Args: diff --git a/src/meltano/core/plugin_test_service.py b/src/meltano/core/plugin_test_service.py index 8f56a7f22c..47a6031d9e 100644 --- a/src/meltano/core/plugin_test_service.py +++ b/src/meltano/core/plugin_test_service.py @@ -18,12 +18,24 @@ class PluginTestServiceFactory: """Factory class to resolve a plugin test service.""" def __init__(self, plugin_invoker: PluginInvoker): - """Construct a PluginTestServiceFactory instance.""" + """Construct a PluginTestServiceFactory instance. + + Args: + plugin_invoker: The invocation instance of the plugin to test. + """ self.plugin_invoker = plugin_invoker def get_test_service(self): - """Resolve a test service instance for a plugin type.""" + """Resolve a test service instance for a plugin type. + + Returns: + The test service instance. + + Raises: + PluginNotSupportedError: If the plugin type is not supported for testing. + """ test_services = {PluginType.EXTRACTORS: ExtractorTestService} + try: return test_services[self.plugin_invoker.plugin.type](self.plugin_invoker) except KeyError as err: @@ -34,19 +46,27 @@ class PluginTestService(ABC): """Abstract base class for plugin test operations.""" def __init__(self, plugin_invoker: PluginInvoker): - """Construct a PluginTestService instance.""" + """Construct a PluginTestService instance. + + Args: + plugin_invoker: The invocation instance of the plugin to test + """ self.plugin_invoker = plugin_invoker @abstractmethod - def validate(self) -> bool | str: + async def validate(self) -> tuple[bool, str]: """Abstract method to validate plugin configuration.""" class ExtractorTestService(PluginTestService): """Handle extractor test operations.""" - async def validate(self) -> bool | str: - """Validate extractor configuration.""" + async def validate(self) -> tuple[bool, str]: + """Validate extractor configuration. + + Returns: + The validation result and supporting context message (if applicable). + """ process = None try: @@ -71,8 +91,13 @@ async def validate(self) -> bool | str: if message_type == "RECORD": process.terminate() - return True, None + break - await process.wait() + returncode = await process.wait() - return False, last_line if process.returncode else "No RECORD message received" + # considered valid if subprocess is terminated (exit status < 0) on RECORD message received + # see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode + return ( + returncode < 0, + last_line if returncode else "No RECORD message received", + ) diff --git a/tests/meltano/api/controllers/test_orchestration.py b/tests/meltano/api/controllers/test_orchestration.py index dd58bed53b..07202349e9 100644 --- a/tests/meltano/api/controllers/test_orchestration.py +++ b/tests/meltano/api/controllers/test_orchestration.py @@ -133,8 +133,8 @@ def test_test_plugin_configuration_success( mock_invoke = mock.Mock() mock_invoke.sterr.at_eof.side_effect = True mock_invoke.stdout.at_eof.side_effect = (False, True) - mock_invoke.wait = AsyncMock(return_value=0) - mock_invoke.returncode = 0 + mock_invoke.wait = AsyncMock(return_value=-1) + mock_invoke.returncode = -1 payload = json.dumps({"type": "RECORD"}).encode() mock_invoke.stdout.readline = AsyncMock(return_value=b"%b" % payload) diff --git a/tests/meltano/cli/test_config.py b/tests/meltano/cli/test_config.py index 01cc47b318..45702978f8 100644 --- a/tests/meltano/cli/test_config.py +++ b/tests/meltano/cli/test_config.py @@ -78,8 +78,8 @@ def test_config_test( mock_invoke = mock.Mock() mock_invoke.sterr.at_eof.side_effect = True mock_invoke.stdout.at_eof.side_effect = (False, True) - mock_invoke.wait = AsyncMock(return_value=0) - mock_invoke.returncode = 0 + mock_invoke.wait = AsyncMock(return_value=-1) + mock_invoke.returncode = -1 payload = json.dumps({"type": "RECORD"}).encode() mock_invoke.stdout.readline = AsyncMock(return_value=b"%b" % payload) diff --git a/tests/meltano/core/test_plugins_test_service.py b/tests/meltano/core/test_plugin_test_service.py similarity index 95% rename from tests/meltano/core/test_plugins_test_service.py rename to tests/meltano/core/test_plugin_test_service.py index 68f50f15ee..03f0a8c6c1 100644 --- a/tests/meltano/core/test_plugins_test_service.py +++ b/tests/meltano/core/test_plugin_test_service.py @@ -45,8 +45,8 @@ class TestExtractorTestService: def setup(self, mock_invoker): self.mock_invoke = Mock() self.mock_invoke.name = "utility-mock" - self.mock_invoke.wait = AsyncMock(return_value=0) - self.mock_invoke.returncode = 0 + self.mock_invoke.wait = AsyncMock(return_value=-1) + self.mock_invoke.returncode = -1 self.mock_invoker = mock_invoker self.mock_invoker.invoke_async = AsyncMock(return_value=self.mock_invoke) @@ -61,7 +61,7 @@ async def test_validate_success(self): is_valid, detail = await ExtractorTestService(self.mock_invoker).validate() assert is_valid - assert detail is None + assert detail == MOCK_RECORD_MESSAGE @pytest.mark.asyncio async def test_validate_success_ignore_non_json(self): @@ -74,7 +74,7 @@ async def test_validate_success_ignore_non_json(self): is_valid, detail = await ExtractorTestService(self.mock_invoker).validate() assert is_valid - assert detail is None + assert detail == MOCK_RECORD_MESSAGE @pytest.mark.asyncio async def test_validate_success_ignore_non_record_msg(self): @@ -90,7 +90,7 @@ async def test_validate_success_ignore_non_record_msg(self): is_valid, detail = await ExtractorTestService(self.mock_invoker).validate() assert is_valid - assert detail is None + assert detail == MOCK_RECORD_MESSAGE @pytest.mark.asyncio async def test_validate_success_stop_after_record_msg(self): @@ -107,7 +107,7 @@ async def test_validate_success_stop_after_record_msg(self): is_valid, detail = await ExtractorTestService(self.mock_invoker).validate() assert is_valid - assert detail is None + assert detail == MOCK_RECORD_MESSAGE assert self.mock_invoke.stdout.readline.call_count == 2 @@ -119,6 +119,9 @@ async def test_validate_failure_no_record_msg(self): return_value=(b"%b" % MOCK_STATE_MESSAGE.encode()) ) + self.mock_invoke.wait = AsyncMock(return_value=0) + self.mock_invoke.returncode = 0 + is_valid, detail = await ExtractorTestService(self.mock_invoker).validate() assert not is_valid
GPflow__GPflow-2052
Missing Reference to Manipulating Kernels Page # Documentation/tutorial notebooks In the [Kernel Design page](https://gpflow.github.io/GPflow/2.7.0/notebooks/tailor/kernel_design.html), there is a missing reference to the "Manipulating Kernels" notebook at the end. This notebook seems accessible from the old documentations, up to [2.6.4](https://gpflow.github.io/GPflow/2.6.4/notebooks/advanced/kernels.html). It seems for some reason, this page was removed. Maybe it was considered unnecessary, as some information is given in the getting started page but I disagree. I believe it gives a more comprehensive review of the available kernel implementations, so it would be nice to have it back. As a side note, for some reason 2.6.4 documentation insist on having dark theme for me, but I like the light theme better. Is there an option to change this? I am forced to clean the cookies to get a light background.
[ { "content": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Kernel Design\n#\n# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is\n# \\begin{equation}\n# k(x, x') = \\sigma^2 \\text{min}(x, x')\n# \\end{equation}\n# where $\\sigma^2$ is a variance parameter.\n\n# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gpflow\nfrom gpflow.utilities import positive, print_summary\n\nplt.style.use(\"ggplot\")\n# %matplotlib inline\n\n# %% [markdown]\n# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you\n# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.\n#\n# #### `__init__`\n# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.\n#\n# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.\n#\n# #### `K`\n# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).\n#\n# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.\n#\n# #### `K_diag`\n# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.\n\n# %%\nclass Brownian(gpflow.kernels.Kernel):\n def __init__(self):\n super().__init__(active_dims=[0])\n self.variance = gpflow.Parameter(1.0, transform=positive())\n\n def K(self, X, X2=None):\n if X2 is None:\n X2 = X\n return self.variance * tf.minimum(\n X, tf.transpose(X2)\n ) # this returns a 2D tensor\n\n def K_diag(self, X):\n return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor\n\n\nk_brownian = Brownian()\nprint_summary(k_brownian, fmt=\"notebook\")\n\n# %% [markdown]\n# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:\n\n# %%\nnp.random.seed(23) # for reproducibility\n\n\ndef plotkernelsample(k, ax, xmin=0, xmax=3):\n xx = np.linspace(xmin, xmax, 300)[:, None]\n K = k(xx)\n ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)\n ax.set_title(\"Samples \" + k.__class__.__name__)\n\n\ndef plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):\n xx = np.linspace(xmin, xmax, 100)[:, None]\n ax.plot(xx, k(xx, np.zeros((1, 1)) + other))\n ax.set_title(k.__class__.__name__ + \" k(x, %.1f)\" % other)\n\n\nf, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)\nplotkernelfunction(k_brownian, axes[0], other=2.0)\nplotkernelsample(k_brownian, axes[1])\n\n# %% [markdown]\n# ## Using the kernel in a model\n#\n# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call\n#\n# `k(X, X2)`\n#\n# which computes the kernel matrix.\n#\n# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.\n\n# %%\nnp.random.seed(42)\nX = np.random.rand(5, 1)\nY = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001\n\nk1 = Brownian()\nk2 = gpflow.kernels.Constant()\nk = k1 + k2\n\nm = gpflow.models.GPR((X, Y), kernel=k)\n# m.likelihood.variance.assign(1e-6)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\nprint_summary(m, fmt=\"notebook\")\n\nxx = np.linspace(0, 1.1, 100).reshape(100, 1)\nmean, var = m.predict_y(xx)\nplt.plot(X, Y, \"kx\", mew=2)\n(line,) = plt.plot(xx, mean, lw=2)\n_ = plt.fill_between(\n xx[:, 0],\n mean[:, 0] - 2 * np.sqrt(var[:, 0]),\n mean[:, 0] + 2 * np.sqrt(var[:, 0]),\n color=line.get_color(),\n alpha=0.2,\n)\n\n# %% [markdown]\n# ## See also\n#\n# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.\n", "path": "doc/sphinx/notebooks/tailor/kernel_design.pct.py" } ]
[ { "content": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Kernel Design\n#\n# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is\n# \\begin{equation}\n# k(x, x') = \\sigma^2 \\text{min}(x, x')\n# \\end{equation}\n# where $\\sigma^2$ is a variance parameter.\n\n# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gpflow\nfrom gpflow.utilities import positive, print_summary\n\nplt.style.use(\"ggplot\")\n# %matplotlib inline\n\n# %% [markdown]\n# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you\n# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.\n#\n# #### `__init__`\n# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.\n#\n# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.\n#\n# #### `K`\n# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).\n#\n# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.\n#\n# #### `K_diag`\n# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.\n\n# %%\nclass Brownian(gpflow.kernels.Kernel):\n def __init__(self):\n super().__init__(active_dims=[0])\n self.variance = gpflow.Parameter(1.0, transform=positive())\n\n def K(self, X, X2=None):\n if X2 is None:\n X2 = X\n return self.variance * tf.minimum(\n X, tf.transpose(X2)\n ) # this returns a 2D tensor\n\n def K_diag(self, X):\n return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor\n\n\nk_brownian = Brownian()\nprint_summary(k_brownian, fmt=\"notebook\")\n\n# %% [markdown]\n# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:\n\n# %%\nnp.random.seed(23) # for reproducibility\n\n\ndef plotkernelsample(k, ax, xmin=0, xmax=3):\n xx = np.linspace(xmin, xmax, 300)[:, None]\n K = k(xx)\n ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)\n ax.set_title(\"Samples \" + k.__class__.__name__)\n\n\ndef plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):\n xx = np.linspace(xmin, xmax, 100)[:, None]\n ax.plot(xx, k(xx, np.zeros((1, 1)) + other))\n ax.set_title(k.__class__.__name__ + \" k(x, %.1f)\" % other)\n\n\nf, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)\nplotkernelfunction(k_brownian, axes[0], other=2.0)\nplotkernelsample(k_brownian, axes[1])\n\n# %% [markdown]\n# ## Using the kernel in a model\n#\n# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call\n#\n# `k(X, X2)`\n#\n# which computes the kernel matrix.\n#\n# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.\n\n# %%\nnp.random.seed(42)\nX = np.random.rand(5, 1)\nY = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001\n\nk1 = Brownian()\nk2 = gpflow.kernels.Constant()\nk = k1 + k2\n\nm = gpflow.models.GPR((X, Y), kernel=k)\n# m.likelihood.variance.assign(1e-6)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\nprint_summary(m, fmt=\"notebook\")\n\nxx = np.linspace(0, 1.1, 100).reshape(100, 1)\nmean, var = m.predict_y(xx)\nplt.plot(X, Y, \"kx\", mew=2)\n(line,) = plt.plot(xx, mean, lw=2)\n_ = plt.fill_between(\n xx[:, 0],\n mean[:, 0] - 2 * np.sqrt(var[:, 0]),\n mean[:, 0] + 2 * np.sqrt(var[:, 0]),\n color=line.get_color(),\n alpha=0.2,\n)\n\n# %% [markdown]\n# ## See also\n#\n# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.\n", "path": "doc/sphinx/notebooks/tailor/kernel_design.pct.py" } ]
diff --git a/doc/sphinx/notebooks/tailor/kernel_design.pct.py b/doc/sphinx/notebooks/tailor/kernel_design.pct.py index b72f93be9..67fd32c66 100644 --- a/doc/sphinx/notebooks/tailor/kernel_design.pct.py +++ b/doc/sphinx/notebooks/tailor/kernel_design.pct.py @@ -136,4 +136,4 @@ def plotkernelfunction(k, ax, xmin=0, xmax=3, other=0): # %% [markdown] # ## See also # -# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook. +# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.
conda__conda-build-1493
Getting a "Error: no such patch:" when the field is empty @msarahan I noticed that latest `conda-build` is failing when a field is empty for certain platforms. This used to work: ```yaml patches: - skip_failing_test.patch # [osx] - makefile.vc.patch # [win] ``` but now I need to add a `# [not linux]` to the ` patches` filed otherwise I get: ``` Source cache directory is: /opt/conda/conda-bld/src_cache Downloading source to cache: libspatialite-4.3.0a.tar.gz Downloading http://www.gaia-gis.it/gaia-sins/libspatialite-4.3.0a.tar.gz Success Error: no such patch: /conda-recipes/recipes/libspatialite/ ./scripts/run_docker_build.sh returned exit code 1 ``` I see similar failures for tests too. recipe: https://github.com/ioos/conda-recipes/blob/master/recipes/libspatialite/meta.yaml#L11 CI log: https://circleci.com/gh/ioos/conda-recipes/1363
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nimport contextlib\nfrom difflib import get_close_matches\nimport fnmatch\nfrom glob import glob\nfrom locale import getpreferredencoding\nimport logging\nimport operator\nimport os\nfrom os.path import dirname, getmtime, getsize, isdir, join, isfile, abspath\nimport re\nimport stat\nimport subprocess\n\nimport sys\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\n\nimport filelock\n\nfrom .conda_interface import md5_file, unix_path_to_win, win_path_to_unix\nfrom .conda_interface import PY3, iteritems\nfrom .conda_interface import linked\nfrom .conda_interface import bits, root_dir\n\nfrom conda_build.os_utils import external\n\nif PY3:\n import urllib.parse as urlparse\n import urllib.request as urllib\nelse:\n import urlparse\n import urllib\n\n\nlog = logging.getLogger(__file__)\n\n# elsewhere, kept here for reduced duplication. NOQA because it is not used in this file.\nfrom .conda_interface import rm_rf # NOQA\n\non_win = (sys.platform == 'win32')\n\ncodec = getpreferredencoding() or 'utf-8'\non_win = sys.platform == \"win32\"\nlog = logging.getLogger(__file__)\nroot_script_dir = os.path.join(root_dir, 'Scripts' if on_win else 'bin')\n\n\nPY_TMPL = \"\"\"\\\nif __name__ == '__main__':\n import sys\n import %(module)s\n\n sys.exit(%(module)s.%(func)s())\n\"\"\"\n\n\ndef get_recipe_abspath(recipe):\n \"\"\"resolve recipe dir as absolute path. If recipe is a tarball rather than a folder,\n extract it and return the extracted directory.\n\n Returns the absolute path, and a boolean flag that is true if a tarball has been extracted\n and needs cleanup.\n \"\"\"\n # Don't use byte literals for paths in Python 2\n if not PY3:\n recipe = recipe.decode(getpreferredencoding() or 'utf-8')\n if isfile(recipe):\n if recipe.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):\n recipe_dir = tempfile.mkdtemp()\n t = tarfile.open(recipe, 'r:*')\n t.extractall(path=recipe_dir)\n t.close()\n need_cleanup = True\n else:\n print(\"Ignoring non-recipe: %s\" % recipe)\n return (None, None)\n else:\n recipe_dir = abspath(recipe)\n need_cleanup = False\n return recipe_dir, need_cleanup\n\n\ndef copy_into(src, dst, timeout=90, symlinks=False):\n \"Copy all the files and directories in src to the directory dst\"\n if isdir(src):\n merge_tree(src, dst, symlinks, timeout=timeout)\n\n else:\n if isdir(dst):\n dst_fn = os.path.join(dst, os.path.basename(src))\n else:\n dst_fn = dst\n\n lock = None\n if os.path.isabs(src):\n src_folder = os.path.dirname(src)\n lock = filelock.SoftFileLock(join(src_folder, \".conda_lock\"))\n try:\n if os.path.sep in dst_fn and not os.path.isdir(os.path.dirname(dst_fn)):\n os.makedirs(os.path.dirname(dst_fn))\n if lock:\n lock.acquire(timeout=timeout)\n # with each of these, we are copying less metadata. This seems to be necessary\n # to cope with some shared filesystems with some virtual machine setups.\n # See https://github.com/conda/conda-build/issues/1426\n try:\n shutil.copy2(src, dst_fn)\n except OSError:\n try:\n shutil.copy(src, dst_fn)\n except OSError:\n shutil.copyfile(src, dst_fn)\n except shutil.Error:\n log.debug(\"skipping %s - already exists in %s\", os.path.basename(src), dst)\n finally:\n if lock:\n lock.release()\n\n\n# http://stackoverflow.com/a/22331852/1170370\ndef copytree(src, dst, symlinks=False, ignore=None, dry_run=False):\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutil.copystat(src, dst)\n lst = os.listdir(src)\n if ignore:\n excl = ignore(src, lst)\n lst = [x for x in lst if x not in excl]\n\n dst_lst = [os.path.join(dst, item) for item in lst]\n\n if not dry_run:\n for idx, item in enumerate(lst):\n s = os.path.join(src, item)\n d = dst_lst[idx]\n if symlinks and os.path.islink(s):\n if os.path.lexists(d):\n os.remove(d)\n os.symlink(os.readlink(s), d)\n try:\n st = os.lstat(s)\n mode = stat.S_IMODE(st.st_mode)\n os.lchmod(d, mode)\n except:\n pass # lchmod not available\n elif os.path.isdir(s):\n copytree(s, d, symlinks, ignore)\n else:\n try:\n shutil.copy2(s, d)\n except IOError:\n try:\n shutil.copy(s, d)\n except IOError:\n shutil.copyfile(s, d)\n return dst_lst\n\n\ndef merge_tree(src, dst, symlinks=False, timeout=90):\n \"\"\"\n Merge src into dst recursively by copying all files from src into dst.\n Return a list of all files copied.\n\n Like copytree(src, dst), but raises an error if merging the two trees\n would overwrite any files.\n \"\"\"\n assert src not in dst, (\"Can't merge/copy source into subdirectory of itself. Please create \"\n \"separate spaces for these things.\")\n\n new_files = copytree(src, dst, symlinks=symlinks, dry_run=True)\n # do not copy lock files\n new_files = [f for f in new_files if not f.endswith('.conda_lock')]\n existing = [f for f in new_files if isfile(f)]\n\n if existing:\n raise IOError(\"Can't merge {0} into {1}: file exists: \"\n \"{2}\".format(src, dst, existing[0]))\n\n lock = filelock.SoftFileLock(join(src, \".conda_lock\"))\n lock.acquire(timeout=timeout)\n try:\n copytree(src, dst, symlinks=symlinks)\n except:\n raise\n finally:\n lock.release()\n rm_rf(os.path.join(dst, '.conda_lock'))\n\n\ndef relative(f, d='lib'):\n assert not f.startswith('/'), f\n assert not d.startswith('/'), d\n d = d.strip('/').split('/')\n if d == ['.']:\n d = []\n f = dirname(f).split('/')\n if f == ['']:\n f = []\n while d and f and d[0] == f[0]:\n d.pop(0)\n f.pop(0)\n return '/'.join(((['..'] * len(f)) if f else ['.']) + d)\n\n\ndef _check_call(args, **kwargs):\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n sys.exit('Command failed: %s' % ' '.join(args))\n\n\ndef tar_xf(tarball, dir_path, mode='r:*'):\n if tarball.lower().endswith('.tar.z'):\n uncompress = external.find_executable('uncompress')\n if not uncompress:\n uncompress = external.find_executable('gunzip')\n if not uncompress:\n sys.exit(\"\"\"\\\nuncompress (or gunzip) is required to unarchive .z source files.\n\"\"\")\n subprocess.check_call([uncompress, '-f', tarball])\n tarball = tarball[:-2]\n if not PY3 and tarball.endswith('.tar.xz'):\n unxz = external.find_executable('unxz')\n if not unxz:\n sys.exit(\"\"\"\\\nunxz is required to unarchive .xz source files.\n\"\"\")\n\n subprocess.check_call([unxz, '-f', '-k', tarball])\n tarball = tarball[:-3]\n t = tarfile.open(tarball, mode)\n t.extractall(path=dir_path)\n t.close()\n\n\ndef unzip(zip_path, dir_path):\n z = zipfile.ZipFile(zip_path)\n for name in z.namelist():\n if name.endswith('/'):\n continue\n path = join(dir_path, *name.split('/'))\n dp = dirname(path)\n if not isdir(dp):\n os.makedirs(dp)\n with open(path, 'wb') as fo:\n fo.write(z.read(name))\n z.close()\n\n\ndef file_info(path):\n return {'size': getsize(path),\n 'md5': md5_file(path),\n 'mtime': getmtime(path)}\n\n# Taken from toolz\n\n\ndef groupby(key, seq):\n \"\"\" Group a collection by a key function\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n Non-callable keys imply grouping on a member.\n >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},\n ... {'name': 'Bob', 'gender': 'M'},\n ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP\n {'F': [{'gender': 'F', 'name': 'Alice'}],\n 'M': [{'gender': 'M', 'name': 'Bob'},\n {'gender': 'M', 'name': 'Charlie'}]}\n See Also:\n countby\n \"\"\"\n if not callable(key):\n key = getter(key)\n d = defaultdict(lambda: [].append)\n for item in seq:\n d[key(item)](item)\n rv = {}\n for k, v in iteritems(d):\n rv[k] = v.__self__\n return rv\n\n\ndef getter(index):\n if isinstance(index, list):\n if len(index) == 1:\n index = index[0]\n return lambda x: (x[index],)\n elif index:\n return operator.itemgetter(*index)\n else:\n return lambda x: ()\n else:\n return operator.itemgetter(index)\n\n\ndef comma_join(items):\n \"\"\"\n Like ', '.join(items) but with and\n\n Examples:\n\n >>> comma_join(['a'])\n 'a'\n >>> comma_join(['a', 'b'])\n 'a and b'\n >>> comma_join(['a', 'b', 'c])\n 'a, b, and c'\n \"\"\"\n return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]\n\n\ndef safe_print_unicode(*args, **kwargs):\n \"\"\"\n prints unicode strings to stdout using configurable `errors` handler for\n encoding errors\n\n :param args: unicode strings to print to stdout\n :param sep: separator (defaults to ' ')\n :param end: ending character (defaults to '\\n')\n :param errors: error handler for encoding errors (defaults to 'replace')\n \"\"\"\n sep = kwargs.pop('sep', u' ')\n end = kwargs.pop('end', u'\\n')\n errors = kwargs.pop('errors', 'replace')\n if PY3:\n func = sys.stdout.buffer.write\n else:\n func = sys.stdout.write\n line = sep.join(args) + end\n encoding = sys.stdout.encoding or 'utf8'\n func(line.encode(encoding, errors))\n\n\ndef rec_glob(path, patterns):\n result = []\n for d_f in os.walk(path):\n m = []\n for pattern in patterns:\n m.extend(fnmatch.filter(d_f[2], pattern))\n if m:\n result.extend([os.path.join(d_f[0], f) for f in m])\n return result\n\n\ndef convert_unix_path_to_win(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -w {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = unix_path_to_win(path)\n return path\n\n\ndef convert_win_path_to_unix(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -u {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = win_path_to_unix(path)\n return path\n\n\n# Used for translating local paths into url (file://) paths\n# http://stackoverflow.com/a/14298190/1170370\ndef path2url(path):\n return urlparse.urljoin('file:', urllib.pathname2url(path))\n\n\ndef get_stdlib_dir(prefix):\n if sys.platform == 'win32':\n stdlib_dir = os.path.join(prefix, 'Lib')\n else:\n lib_dir = os.path.join(prefix, 'lib')\n stdlib_dir = glob(os.path.join(lib_dir, 'python[0-9\\.]*'))\n if not stdlib_dir:\n stdlib_dir = ''\n else:\n stdlib_dir = stdlib_dir[0]\n return stdlib_dir\n\n\ndef get_site_packages(prefix):\n stdlib_dir = get_stdlib_dir(prefix)\n sp = ''\n if stdlib_dir:\n sp = os.path.join(stdlib_dir, 'site-packages')\n return sp\n\n\ndef get_build_folders(croot):\n # remember, glob is not a regex.\n return glob(os.path.join(croot, \"*\" + \"[0-9]\" * 10 + \"*\"))\n\n\ndef silence_loggers(show_warnings_and_errors=True):\n if show_warnings_and_errors:\n log_level = logging.WARN\n else:\n log_level = logging.CRITICAL + 1\n logging.getLogger(os.path.dirname(__file__)).setLevel(log_level)\n # This squelches a ton of conda output that is not hugely relevant\n logging.getLogger(\"conda\").setLevel(log_level)\n logging.getLogger(\"binstar\").setLevel(log_level)\n logging.getLogger(\"install\").setLevel(log_level + 10)\n logging.getLogger(\"conda.install\").setLevel(log_level + 10)\n logging.getLogger(\"fetch\").setLevel(log_level)\n logging.getLogger(\"print\").setLevel(log_level)\n logging.getLogger(\"progress\").setLevel(log_level)\n logging.getLogger(\"dotupdate\").setLevel(log_level)\n logging.getLogger(\"stdoutlog\").setLevel(log_level)\n logging.getLogger(\"requests\").setLevel(log_level)\n\n\ndef prepend_bin_path(env, prefix, prepend_prefix=False):\n # bin_dirname takes care of bin on *nix, Scripts on win\n env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']\n if sys.platform == \"win32\":\n env['PATH'] = join(prefix, \"Library\", \"mingw-w64\", \"bin\") + os.pathsep + \\\n join(prefix, \"Library\", \"usr\", \"bin\") + os.pathsep + os.pathsep + \\\n join(prefix, \"Library\", \"bin\") + os.pathsep + \\\n join(prefix, \"Scripts\") + os.pathsep + \\\n env['PATH']\n prepend_prefix = True # windows has Python in the prefix. Use it.\n if prepend_prefix:\n env['PATH'] = prefix + os.pathsep + env['PATH']\n return env\n\n\n# not currently used. Leaving in because it may be useful for when we do things\n# like load setup.py data, and we need the modules from some prefix other than\n# the root prefix, which is what conda-build runs from.\[email protected]\ndef sys_path_prepended(prefix):\n path_backup = sys.path[:]\n if on_win:\n sys.path.insert(1, os.path.join(prefix, 'lib', 'site-packages'))\n else:\n lib_dir = os.path.join(prefix, 'lib')\n python_dir = glob(os.path.join(lib_dir, 'python[0-9\\.]*'))\n if python_dir:\n python_dir = python_dir[0]\n sys.path.insert(1, os.path.join(python_dir, 'site-packages'))\n try:\n yield\n finally:\n sys.path = path_backup\n\n\[email protected]\ndef path_prepended(prefix):\n old_path = os.environ['PATH']\n os.environ['PATH'] = prepend_bin_path(os.environ.copy(), prefix, True)['PATH']\n try:\n yield\n finally:\n os.environ['PATH'] = old_path\n\nbin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'\n\nentry_pat = re.compile('\\s*([\\w\\-\\.]+)\\s*=\\s*([\\w.]+):([\\w.]+)\\s*$')\n\n\ndef iter_entry_points(items):\n for item in items:\n m = entry_pat.match(item)\n if m is None:\n sys.exit(\"Error cound not match entry point: %r\" % item)\n yield m.groups()\n\n\ndef create_entry_point(path, module, func, config):\n pyscript = PY_TMPL % {'module': module, 'func': func}\n if sys.platform == 'win32':\n with open(path + '-script.py', 'w') as fo:\n packages = linked(config.build_prefix)\n packages_names = (pkg.split('-')[0] for pkg in packages)\n if 'debug' in packages_names:\n fo.write('#!python_d\\n')\n fo.write(pyscript)\n copy_into(join(dirname(__file__), 'cli-%d.exe' % bits), path + '.exe', config.timeout)\n else:\n with open(path, 'w') as fo:\n fo.write('#!%s\\n' % config.build_python)\n fo.write(pyscript)\n os.chmod(path, 0o775)\n\n\ndef create_entry_points(items, config):\n if not items:\n return\n bin_dir = join(config.build_prefix, bin_dirname)\n if not isdir(bin_dir):\n os.mkdir(bin_dir)\n for cmd, module, func in iter_entry_points(items):\n create_entry_point(join(bin_dir, cmd), module, func, config)\n\n\ndef guess_license_family(license_name, allowed_license_families):\n # Tend towards the more clear GPL3 and away from the ambiguity of GPL2.\n if 'GPL (>= 2)' in license_name or license_name == 'GPL':\n return 'GPL3'\n elif 'LGPL' in license_name:\n return 'LGPL'\n else:\n return get_close_matches(license_name,\n allowed_license_families, 1, 0.0)[0]\n\n\n# Return all files in dir, and all its subdirectories, ending in pattern\ndef get_ext_files(start_path, pattern):\n for root, _, files in os.walk(start_path):\n for f in files:\n if f.endswith(pattern):\n yield os.path.join(root, f)\n\n\ndef _func_defaulting_env_to_os_environ(func, *popenargs, **kwargs):\n if 'env' not in kwargs:\n kwargs = kwargs.copy()\n env_copy = os.environ.copy()\n kwargs.update({'env': env_copy})\n _args = []\n for arg in popenargs:\n # arguments to subprocess need to be bytestrings\n if sys.version_info.major < 3 and hasattr(arg, 'encode'):\n arg = arg.encode(codec)\n elif sys.version_info.major >= 3 and hasattr(arg, 'decode'):\n arg = arg.decode(codec)\n _args.append(str(arg))\n return func(_args, **kwargs)\n\n\ndef check_call_env(popenargs, **kwargs):\n return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)\n\n\ndef check_output_env(popenargs, **kwargs):\n return _func_defaulting_env_to_os_environ(subprocess.check_output, *popenargs, **kwargs)\n\n\n_posix_exes_cache = {}\n\n\ndef convert_path_for_cygwin_or_msys2(exe, path):\n \"If exe is a Cygwin or MSYS2 executable then filters it through `cygpath -u`\"\n if sys.platform != 'win32':\n return path\n if exe not in _posix_exes_cache:\n with open(exe, \"rb\") as exe_file:\n exe_binary = exe_file.read()\n msys2_cygwin = re.findall(b'(cygwin1.dll|msys-2.0.dll)', exe_binary)\n _posix_exes_cache[exe] = True if msys2_cygwin else False\n if _posix_exes_cache[exe]:\n return check_output_env(['cygpath', '-u',\n path]).splitlines()[0].decode(getpreferredencoding())\n return path\n\n\ndef print_skip_message(metadata):\n print(\"Skipped: {} defines build/skip for this \"\n \"configuration.\".format(metadata.path))\n\n\ndef package_has_file(package_path, file_path):\n try:\n with tarfile.open(package_path) as t:\n try:\n # internal paths are always forward slashed on all platforms\n file_path = file_path.replace('\\\\', '/')\n text = t.extractfile(file_path).read()\n return text\n except KeyError:\n return False\n except OSError as e:\n raise RuntimeError(\"Could not extract %s (%s)\" % (package_path, e))\n except tarfile.ReadError:\n raise RuntimeError(\"Could not extract metadata from %s. \"\n \"File probably corrupt.\" % package_path)\n\n\ndef ensure_list(arg):\n from .conda_interface import string_types\n if isinstance(arg, string_types) or not hasattr(arg, '__iter__'):\n arg = [arg]\n return arg\n", "path": "conda_build/utils.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nimport contextlib\nfrom difflib import get_close_matches\nimport fnmatch\nfrom glob import glob\nfrom locale import getpreferredencoding\nimport logging\nimport operator\nimport os\nfrom os.path import dirname, getmtime, getsize, isdir, join, isfile, abspath\nimport re\nimport stat\nimport subprocess\n\nimport sys\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\n\nimport filelock\n\nfrom .conda_interface import md5_file, unix_path_to_win, win_path_to_unix\nfrom .conda_interface import PY3, iteritems\nfrom .conda_interface import linked\nfrom .conda_interface import bits, root_dir\n\nfrom conda_build.os_utils import external\n\nif PY3:\n import urllib.parse as urlparse\n import urllib.request as urllib\nelse:\n import urlparse\n import urllib\n\n\nlog = logging.getLogger(__file__)\n\n# elsewhere, kept here for reduced duplication. NOQA because it is not used in this file.\nfrom .conda_interface import rm_rf # NOQA\n\non_win = (sys.platform == 'win32')\n\ncodec = getpreferredencoding() or 'utf-8'\non_win = sys.platform == \"win32\"\nlog = logging.getLogger(__file__)\nroot_script_dir = os.path.join(root_dir, 'Scripts' if on_win else 'bin')\n\n\nPY_TMPL = \"\"\"\\\nif __name__ == '__main__':\n import sys\n import %(module)s\n\n sys.exit(%(module)s.%(func)s())\n\"\"\"\n\n\ndef get_recipe_abspath(recipe):\n \"\"\"resolve recipe dir as absolute path. If recipe is a tarball rather than a folder,\n extract it and return the extracted directory.\n\n Returns the absolute path, and a boolean flag that is true if a tarball has been extracted\n and needs cleanup.\n \"\"\"\n # Don't use byte literals for paths in Python 2\n if not PY3:\n recipe = recipe.decode(getpreferredencoding() or 'utf-8')\n if isfile(recipe):\n if recipe.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):\n recipe_dir = tempfile.mkdtemp()\n t = tarfile.open(recipe, 'r:*')\n t.extractall(path=recipe_dir)\n t.close()\n need_cleanup = True\n else:\n print(\"Ignoring non-recipe: %s\" % recipe)\n return (None, None)\n else:\n recipe_dir = abspath(recipe)\n need_cleanup = False\n return recipe_dir, need_cleanup\n\n\ndef copy_into(src, dst, timeout=90, symlinks=False):\n \"Copy all the files and directories in src to the directory dst\"\n if isdir(src):\n merge_tree(src, dst, symlinks, timeout=timeout)\n\n else:\n if isdir(dst):\n dst_fn = os.path.join(dst, os.path.basename(src))\n else:\n dst_fn = dst\n\n lock = None\n if os.path.isabs(src):\n src_folder = os.path.dirname(src)\n lock = filelock.SoftFileLock(join(src_folder, \".conda_lock\"))\n try:\n if os.path.sep in dst_fn and not os.path.isdir(os.path.dirname(dst_fn)):\n os.makedirs(os.path.dirname(dst_fn))\n if lock:\n lock.acquire(timeout=timeout)\n # with each of these, we are copying less metadata. This seems to be necessary\n # to cope with some shared filesystems with some virtual machine setups.\n # See https://github.com/conda/conda-build/issues/1426\n try:\n shutil.copy2(src, dst_fn)\n except OSError:\n try:\n shutil.copy(src, dst_fn)\n except OSError:\n shutil.copyfile(src, dst_fn)\n except shutil.Error:\n log.debug(\"skipping %s - already exists in %s\", os.path.basename(src), dst)\n finally:\n if lock:\n lock.release()\n\n\n# http://stackoverflow.com/a/22331852/1170370\ndef copytree(src, dst, symlinks=False, ignore=None, dry_run=False):\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutil.copystat(src, dst)\n lst = os.listdir(src)\n if ignore:\n excl = ignore(src, lst)\n lst = [x for x in lst if x not in excl]\n\n dst_lst = [os.path.join(dst, item) for item in lst]\n\n if not dry_run:\n for idx, item in enumerate(lst):\n s = os.path.join(src, item)\n d = dst_lst[idx]\n if symlinks and os.path.islink(s):\n if os.path.lexists(d):\n os.remove(d)\n os.symlink(os.readlink(s), d)\n try:\n st = os.lstat(s)\n mode = stat.S_IMODE(st.st_mode)\n os.lchmod(d, mode)\n except:\n pass # lchmod not available\n elif os.path.isdir(s):\n copytree(s, d, symlinks, ignore)\n else:\n try:\n shutil.copy2(s, d)\n except IOError:\n try:\n shutil.copy(s, d)\n except IOError:\n shutil.copyfile(s, d)\n return dst_lst\n\n\ndef merge_tree(src, dst, symlinks=False, timeout=90):\n \"\"\"\n Merge src into dst recursively by copying all files from src into dst.\n Return a list of all files copied.\n\n Like copytree(src, dst), but raises an error if merging the two trees\n would overwrite any files.\n \"\"\"\n assert src not in dst, (\"Can't merge/copy source into subdirectory of itself. Please create \"\n \"separate spaces for these things.\")\n\n new_files = copytree(src, dst, symlinks=symlinks, dry_run=True)\n # do not copy lock files\n new_files = [f for f in new_files if not f.endswith('.conda_lock')]\n existing = [f for f in new_files if isfile(f)]\n\n if existing:\n raise IOError(\"Can't merge {0} into {1}: file exists: \"\n \"{2}\".format(src, dst, existing[0]))\n\n lock = filelock.SoftFileLock(join(src, \".conda_lock\"))\n lock.acquire(timeout=timeout)\n try:\n copytree(src, dst, symlinks=symlinks)\n except:\n raise\n finally:\n lock.release()\n rm_rf(os.path.join(dst, '.conda_lock'))\n\n\ndef relative(f, d='lib'):\n assert not f.startswith('/'), f\n assert not d.startswith('/'), d\n d = d.strip('/').split('/')\n if d == ['.']:\n d = []\n f = dirname(f).split('/')\n if f == ['']:\n f = []\n while d and f and d[0] == f[0]:\n d.pop(0)\n f.pop(0)\n return '/'.join(((['..'] * len(f)) if f else ['.']) + d)\n\n\ndef _check_call(args, **kwargs):\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n sys.exit('Command failed: %s' % ' '.join(args))\n\n\ndef tar_xf(tarball, dir_path, mode='r:*'):\n if tarball.lower().endswith('.tar.z'):\n uncompress = external.find_executable('uncompress')\n if not uncompress:\n uncompress = external.find_executable('gunzip')\n if not uncompress:\n sys.exit(\"\"\"\\\nuncompress (or gunzip) is required to unarchive .z source files.\n\"\"\")\n subprocess.check_call([uncompress, '-f', tarball])\n tarball = tarball[:-2]\n if not PY3 and tarball.endswith('.tar.xz'):\n unxz = external.find_executable('unxz')\n if not unxz:\n sys.exit(\"\"\"\\\nunxz is required to unarchive .xz source files.\n\"\"\")\n\n subprocess.check_call([unxz, '-f', '-k', tarball])\n tarball = tarball[:-3]\n t = tarfile.open(tarball, mode)\n t.extractall(path=dir_path)\n t.close()\n\n\ndef unzip(zip_path, dir_path):\n z = zipfile.ZipFile(zip_path)\n for name in z.namelist():\n if name.endswith('/'):\n continue\n path = join(dir_path, *name.split('/'))\n dp = dirname(path)\n if not isdir(dp):\n os.makedirs(dp)\n with open(path, 'wb') as fo:\n fo.write(z.read(name))\n z.close()\n\n\ndef file_info(path):\n return {'size': getsize(path),\n 'md5': md5_file(path),\n 'mtime': getmtime(path)}\n\n# Taken from toolz\n\n\ndef groupby(key, seq):\n \"\"\" Group a collection by a key function\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n Non-callable keys imply grouping on a member.\n >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},\n ... {'name': 'Bob', 'gender': 'M'},\n ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP\n {'F': [{'gender': 'F', 'name': 'Alice'}],\n 'M': [{'gender': 'M', 'name': 'Bob'},\n {'gender': 'M', 'name': 'Charlie'}]}\n See Also:\n countby\n \"\"\"\n if not callable(key):\n key = getter(key)\n d = defaultdict(lambda: [].append)\n for item in seq:\n d[key(item)](item)\n rv = {}\n for k, v in iteritems(d):\n rv[k] = v.__self__\n return rv\n\n\ndef getter(index):\n if isinstance(index, list):\n if len(index) == 1:\n index = index[0]\n return lambda x: (x[index],)\n elif index:\n return operator.itemgetter(*index)\n else:\n return lambda x: ()\n else:\n return operator.itemgetter(index)\n\n\ndef comma_join(items):\n \"\"\"\n Like ', '.join(items) but with and\n\n Examples:\n\n >>> comma_join(['a'])\n 'a'\n >>> comma_join(['a', 'b'])\n 'a and b'\n >>> comma_join(['a', 'b', 'c])\n 'a, b, and c'\n \"\"\"\n return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]\n\n\ndef safe_print_unicode(*args, **kwargs):\n \"\"\"\n prints unicode strings to stdout using configurable `errors` handler for\n encoding errors\n\n :param args: unicode strings to print to stdout\n :param sep: separator (defaults to ' ')\n :param end: ending character (defaults to '\\n')\n :param errors: error handler for encoding errors (defaults to 'replace')\n \"\"\"\n sep = kwargs.pop('sep', u' ')\n end = kwargs.pop('end', u'\\n')\n errors = kwargs.pop('errors', 'replace')\n if PY3:\n func = sys.stdout.buffer.write\n else:\n func = sys.stdout.write\n line = sep.join(args) + end\n encoding = sys.stdout.encoding or 'utf8'\n func(line.encode(encoding, errors))\n\n\ndef rec_glob(path, patterns):\n result = []\n for d_f in os.walk(path):\n m = []\n for pattern in patterns:\n m.extend(fnmatch.filter(d_f[2], pattern))\n if m:\n result.extend([os.path.join(d_f[0], f) for f in m])\n return result\n\n\ndef convert_unix_path_to_win(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -w {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = unix_path_to_win(path)\n return path\n\n\ndef convert_win_path_to_unix(path):\n if external.find_executable('cygpath'):\n cmd = \"cygpath -u {0}\".format(path)\n if PY3:\n path = subprocess.getoutput(cmd)\n else:\n path = subprocess.check_output(cmd.split()).rstrip().rstrip(\"\\\\\")\n\n else:\n path = win_path_to_unix(path)\n return path\n\n\n# Used for translating local paths into url (file://) paths\n# http://stackoverflow.com/a/14298190/1170370\ndef path2url(path):\n return urlparse.urljoin('file:', urllib.pathname2url(path))\n\n\ndef get_stdlib_dir(prefix):\n if sys.platform == 'win32':\n stdlib_dir = os.path.join(prefix, 'Lib')\n else:\n lib_dir = os.path.join(prefix, 'lib')\n stdlib_dir = glob(os.path.join(lib_dir, 'python[0-9\\.]*'))\n if not stdlib_dir:\n stdlib_dir = ''\n else:\n stdlib_dir = stdlib_dir[0]\n return stdlib_dir\n\n\ndef get_site_packages(prefix):\n stdlib_dir = get_stdlib_dir(prefix)\n sp = ''\n if stdlib_dir:\n sp = os.path.join(stdlib_dir, 'site-packages')\n return sp\n\n\ndef get_build_folders(croot):\n # remember, glob is not a regex.\n return glob(os.path.join(croot, \"*\" + \"[0-9]\" * 10 + \"*\"))\n\n\ndef silence_loggers(show_warnings_and_errors=True):\n if show_warnings_and_errors:\n log_level = logging.WARN\n else:\n log_level = logging.CRITICAL + 1\n logging.getLogger(os.path.dirname(__file__)).setLevel(log_level)\n # This squelches a ton of conda output that is not hugely relevant\n logging.getLogger(\"conda\").setLevel(log_level)\n logging.getLogger(\"binstar\").setLevel(log_level)\n logging.getLogger(\"install\").setLevel(log_level + 10)\n logging.getLogger(\"conda.install\").setLevel(log_level + 10)\n logging.getLogger(\"fetch\").setLevel(log_level)\n logging.getLogger(\"print\").setLevel(log_level)\n logging.getLogger(\"progress\").setLevel(log_level)\n logging.getLogger(\"dotupdate\").setLevel(log_level)\n logging.getLogger(\"stdoutlog\").setLevel(log_level)\n logging.getLogger(\"requests\").setLevel(log_level)\n\n\ndef prepend_bin_path(env, prefix, prepend_prefix=False):\n # bin_dirname takes care of bin on *nix, Scripts on win\n env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']\n if sys.platform == \"win32\":\n env['PATH'] = join(prefix, \"Library\", \"mingw-w64\", \"bin\") + os.pathsep + \\\n join(prefix, \"Library\", \"usr\", \"bin\") + os.pathsep + os.pathsep + \\\n join(prefix, \"Library\", \"bin\") + os.pathsep + \\\n join(prefix, \"Scripts\") + os.pathsep + \\\n env['PATH']\n prepend_prefix = True # windows has Python in the prefix. Use it.\n if prepend_prefix:\n env['PATH'] = prefix + os.pathsep + env['PATH']\n return env\n\n\n# not currently used. Leaving in because it may be useful for when we do things\n# like load setup.py data, and we need the modules from some prefix other than\n# the root prefix, which is what conda-build runs from.\[email protected]\ndef sys_path_prepended(prefix):\n path_backup = sys.path[:]\n if on_win:\n sys.path.insert(1, os.path.join(prefix, 'lib', 'site-packages'))\n else:\n lib_dir = os.path.join(prefix, 'lib')\n python_dir = glob(os.path.join(lib_dir, 'python[0-9\\.]*'))\n if python_dir:\n python_dir = python_dir[0]\n sys.path.insert(1, os.path.join(python_dir, 'site-packages'))\n try:\n yield\n finally:\n sys.path = path_backup\n\n\[email protected]\ndef path_prepended(prefix):\n old_path = os.environ['PATH']\n os.environ['PATH'] = prepend_bin_path(os.environ.copy(), prefix, True)['PATH']\n try:\n yield\n finally:\n os.environ['PATH'] = old_path\n\nbin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'\n\nentry_pat = re.compile('\\s*([\\w\\-\\.]+)\\s*=\\s*([\\w.]+):([\\w.]+)\\s*$')\n\n\ndef iter_entry_points(items):\n for item in items:\n m = entry_pat.match(item)\n if m is None:\n sys.exit(\"Error cound not match entry point: %r\" % item)\n yield m.groups()\n\n\ndef create_entry_point(path, module, func, config):\n pyscript = PY_TMPL % {'module': module, 'func': func}\n if sys.platform == 'win32':\n with open(path + '-script.py', 'w') as fo:\n packages = linked(config.build_prefix)\n packages_names = (pkg.split('-')[0] for pkg in packages)\n if 'debug' in packages_names:\n fo.write('#!python_d\\n')\n fo.write(pyscript)\n copy_into(join(dirname(__file__), 'cli-%d.exe' % bits), path + '.exe', config.timeout)\n else:\n with open(path, 'w') as fo:\n fo.write('#!%s\\n' % config.build_python)\n fo.write(pyscript)\n os.chmod(path, 0o775)\n\n\ndef create_entry_points(items, config):\n if not items:\n return\n bin_dir = join(config.build_prefix, bin_dirname)\n if not isdir(bin_dir):\n os.mkdir(bin_dir)\n for cmd, module, func in iter_entry_points(items):\n create_entry_point(join(bin_dir, cmd), module, func, config)\n\n\ndef guess_license_family(license_name, allowed_license_families):\n # Tend towards the more clear GPL3 and away from the ambiguity of GPL2.\n if 'GPL (>= 2)' in license_name or license_name == 'GPL':\n return 'GPL3'\n elif 'LGPL' in license_name:\n return 'LGPL'\n else:\n return get_close_matches(license_name,\n allowed_license_families, 1, 0.0)[0]\n\n\n# Return all files in dir, and all its subdirectories, ending in pattern\ndef get_ext_files(start_path, pattern):\n for root, _, files in os.walk(start_path):\n for f in files:\n if f.endswith(pattern):\n yield os.path.join(root, f)\n\n\ndef _func_defaulting_env_to_os_environ(func, *popenargs, **kwargs):\n if 'env' not in kwargs:\n kwargs = kwargs.copy()\n env_copy = os.environ.copy()\n kwargs.update({'env': env_copy})\n _args = []\n for arg in popenargs:\n # arguments to subprocess need to be bytestrings\n if sys.version_info.major < 3 and hasattr(arg, 'encode'):\n arg = arg.encode(codec)\n elif sys.version_info.major >= 3 and hasattr(arg, 'decode'):\n arg = arg.decode(codec)\n _args.append(str(arg))\n return func(_args, **kwargs)\n\n\ndef check_call_env(popenargs, **kwargs):\n return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)\n\n\ndef check_output_env(popenargs, **kwargs):\n return _func_defaulting_env_to_os_environ(subprocess.check_output, *popenargs, **kwargs)\n\n\n_posix_exes_cache = {}\n\n\ndef convert_path_for_cygwin_or_msys2(exe, path):\n \"If exe is a Cygwin or MSYS2 executable then filters it through `cygpath -u`\"\n if sys.platform != 'win32':\n return path\n if exe not in _posix_exes_cache:\n with open(exe, \"rb\") as exe_file:\n exe_binary = exe_file.read()\n msys2_cygwin = re.findall(b'(cygwin1.dll|msys-2.0.dll)', exe_binary)\n _posix_exes_cache[exe] = True if msys2_cygwin else False\n if _posix_exes_cache[exe]:\n return check_output_env(['cygpath', '-u',\n path]).splitlines()[0].decode(getpreferredencoding())\n return path\n\n\ndef print_skip_message(metadata):\n print(\"Skipped: {} defines build/skip for this \"\n \"configuration.\".format(metadata.path))\n\n\ndef package_has_file(package_path, file_path):\n try:\n with tarfile.open(package_path) as t:\n try:\n # internal paths are always forward slashed on all platforms\n file_path = file_path.replace('\\\\', '/')\n text = t.extractfile(file_path).read()\n return text\n except KeyError:\n return False\n except OSError as e:\n raise RuntimeError(\"Could not extract %s (%s)\" % (package_path, e))\n except tarfile.ReadError:\n raise RuntimeError(\"Could not extract metadata from %s. \"\n \"File probably corrupt.\" % package_path)\n\n\ndef ensure_list(arg):\n from .conda_interface import string_types\n if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')):\n if arg:\n arg = [arg]\n else:\n arg = []\n return arg\n", "path": "conda_build/utils.py" } ]
diff --git a/conda_build/utils.py b/conda_build/utils.py index 882930380b..1cf9d045c1 100644 --- a/conda_build/utils.py +++ b/conda_build/utils.py @@ -598,6 +598,9 @@ def package_has_file(package_path, file_path): def ensure_list(arg): from .conda_interface import string_types - if isinstance(arg, string_types) or not hasattr(arg, '__iter__'): - arg = [arg] + if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')): + if arg: + arg = [arg] + else: + arg = [] return arg diff --git a/tests/test-recipes/metadata/empty_patch_section/meta.yaml b/tests/test-recipes/metadata/empty_patch_section/meta.yaml new file mode 100644 index 0000000000..5387aa8066 --- /dev/null +++ b/tests/test-recipes/metadata/empty_patch_section/meta.yaml @@ -0,0 +1,9 @@ +package: + name: patch_section_empty + version: 1.0 + +source: + path: . + # the test here is that selectors can make this field empty. Make it empty here no matter what. + # https://github.com/conda/conda-build/issues/1490 + patches:
modin-project__modin-4769
test_join_sort.py in CI failed by timeout with new Dask release - 2022.2.0 Error: https://github.com/modin-project/modin/runs/5195622251?check_suite_focus=true Dask release - https://github.com/dask/dask/releases/tag/2022.02.0 Fastest option here - pin `dask<2022.2.0`, but it also requires an investigation into the cause.
[ { "content": "from setuptools import setup, find_packages\nimport versioneer\nimport sys\n\nPANDAS_VERSION = \"1.4.3\" if sys.version_info >= (3, 8) else \"1.1.5\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0,<2022.2.0\", \"distributed>=2.22.0,<2022.2.0\"]\nif sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n\nray_deps = [\n \"ray[default]>=1.4.0\",\n \"pyarrow>=4.0.1\",\n \"redis>=3.5.0,<4.0.0\",\n]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[f\"pandas=={PANDAS_VERSION}\", \"packaging\", \"numpy>=1.18.5\", \"fsspec\", \"psutil\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.6\",\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\nimport versioneer\nimport sys\n\nPANDAS_VERSION = \"1.4.3\" if sys.version_info >= (3, 8) else \"1.1.5\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\nif sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n\nray_deps = [\n \"ray[default]>=1.4.0\",\n \"pyarrow>=4.0.1\",\n \"redis>=3.5.0,<4.0.0\",\n]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[f\"pandas=={PANDAS_VERSION}\", \"packaging\", \"numpy>=1.18.5\", \"fsspec\", \"psutil\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.6\",\n)\n", "path": "setup.py" } ]
diff --git a/docs/release_notes/release_notes-0.16.0.rst b/docs/release_notes/release_notes-0.16.0.rst index f56ac93898e..e8ee3884053 100644 --- a/docs/release_notes/release_notes-0.16.0.rst +++ b/docs/release_notes/release_notes-0.16.0.rst @@ -72,6 +72,7 @@ Key Features and Updates * Dependencies * FEAT-#4598: Add support for pandas 1.4.3 (#4599) * FEAT-#4619: Integrate mypy static type checking (#4620) + * FEAT-#4202: Allow dask past 2022.2.0 (#4769) * New Features * FEAT-4463: Add experimental fuzzydata integration for testing against a randomized dataframe workflow (#4556) * FEAT-#4419: Extend virtual partitioning API to pandas on Dask (#4420) diff --git a/environment-dev.yml b/environment-dev.yml index 1cdfad7059b..1a23426a22e 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -5,8 +5,8 @@ dependencies: - pandas==1.4.3 - numpy>=1.18.5 - pyarrow>=4.0.1 - - dask[complete]>=2.22.0,<2022.2.0 - - distributed>=2.22.0,<2022.2.0 + - dask[complete]>=2.22.0 + - distributed>=2.22.0 - fsspec - xarray - Jinja2 diff --git a/requirements-dev.txt b/requirements-dev.txt index b50c016bb7f..cf89d697ed1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,8 +1,8 @@ pandas==1.4.3 numpy>=1.18.5 pyarrow>=4.0.1 -dask[complete]>=2.22.0,<2022.2.0 -distributed>=2.22.0,<2022.2.0 +dask[complete]>=2.22.0 +distributed>=2.22.0 ray[default]>=1.4.0 redis>=3.5.0,<4.0.0 psutil diff --git a/requirements/requirements-py36.txt b/requirements/requirements-py36.txt index f42cbd34bc0..a6f1f5e8322 100644 --- a/requirements/requirements-py36.txt +++ b/requirements/requirements-py36.txt @@ -1,8 +1,8 @@ pandas==1.1.5 numpy>=1.18.5 pyarrow>=4.0.1 -dask[complete]>=2.22.0,<2022.2.0 -distributed>=2.22.0,<2022.2.0 +dask[complete]>=2.22.0 +distributed>=2.22.0 ray[default]>=1.4.0 redis>=3.5.0,<4.0.0 psutil diff --git a/setup.py b/setup.py index 3fd25c89003..701f5e3046e 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() -dask_deps = ["dask>=2.22.0,<2022.2.0", "distributed>=2.22.0,<2022.2.0"] +dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"] if sys.version_info < (3, 8): dask_deps.append("pickle5")