in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
conda__conda-build-537 | GIT_DESCRIBE_TAG isn't set when the .git is a file
Sometimes the .git directory is actually a file. This happens, for example, when you have a git submodule. This causes this test to fail incorrectly: https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L36
Unfortunately, the .git file might point to a real git repo in a relative URL, so just moving the directory to the conda build directory can break it being a git repo.
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join\nimport subprocess\nimport multiprocessing\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\ndef get_npy_ver():\n return '.'.join(str(config.CONDA_NPY))\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\ndef get_git_build_info(src_dir):\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if os.path.isdir(git_dir):\n env['GIT_DIR'] = git_dir\n else:\n return d\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = subprocess.Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n value = '<UNDEFINED>'\n d[var_name] = value\n\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n d.update(**get_git_build_info(d['SRC_DIR']))\n\n if sys.platform == 'win32': # -------- Windows\n d['PATH'] = (join(prefix, 'Library', 'bin') + ';' +\n join(prefix) + ';' +\n join(prefix, 'Scripts') + ';%PATH%')\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n # This probably should be done more generally\n d['CYGWIN_PREFIX'] = prefix.replace('\\\\', '/').replace('C:', '/cygdrive/c')\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else: # -------- Unix\n d['PATH'] = '%s/bin:%s' % (prefix, os.getenv('PATH'))\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n if sys.platform == 'darwin': # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = d['CFLAGS']\n d['LDFLAGS'] = d['CFLAGS']\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'): # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n",
"path": "conda_build/environ.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join\nimport subprocess\nimport multiprocessing\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\ndef get_npy_ver():\n return '.'.join(str(config.CONDA_NPY))\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\ndef get_git_build_info(src_dir):\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if os.path.exists(git_dir):\n env['GIT_DIR'] = git_dir\n else:\n return d\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = subprocess.Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n value = '<UNDEFINED>'\n d[var_name] = value\n\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n d.update(**get_git_build_info(d['SRC_DIR']))\n\n if sys.platform == 'win32': # -------- Windows\n d['PATH'] = (join(prefix, 'Library', 'bin') + ';' +\n join(prefix) + ';' +\n join(prefix, 'Scripts') + ';%PATH%')\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n # This probably should be done more generally\n d['CYGWIN_PREFIX'] = prefix.replace('\\\\', '/').replace('C:', '/cygdrive/c')\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else: # -------- Unix\n d['PATH'] = '%s/bin:%s' % (prefix, os.getenv('PATH'))\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n if sys.platform == 'darwin': # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = d['CFLAGS']\n d['LDFLAGS'] = d['CFLAGS']\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'): # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n",
"path": "conda_build/environ.py"
}
] | diff --git a/conda_build/environ.py b/conda_build/environ.py
index d3dc2ca6f6..2d7d4cb680 100644
--- a/conda_build/environ.py
+++ b/conda_build/environ.py
@@ -33,7 +33,7 @@ def get_git_build_info(src_dir):
env = os.environ.copy()
d = {}
git_dir = join(src_dir, '.git')
- if os.path.isdir(git_dir):
+ if os.path.exists(git_dir):
env['GIT_DIR'] = git_dir
else:
return d
|
python__mypy-16229 | Add setuptools as a dependency on Python 3.12?
Mypyc needs `distutils` or `setuptools` to run, but Python 3.12 no longer bundles `distutils` ([PEP 632](https://peps.python.org/pep-0632/)). This seems to imply that we need to include `setuptools` as a dependency of mypy (at least on Python 3.12 or later), or unbundle mypyc into a separate distribution on PyPI. Thoughts?
| [
{
"content": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif sys.version_info < (3, 8, 0): # noqa: UP036\n sys.stderr.write(\"ERROR: You need Python 3.8 or later to use mypy.\\n\")\n exit(1)\n\n# we'll import stuff from the source tree, let's ensure is on the sys path\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still needed for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_py import build_py\n\nfrom mypy.version import __version__ as version\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\ndescription = \"Optional static typing for Python\"\nlong_description = \"\"\"\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n\"\"\".lstrip()\n\n\ndef is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:\n return all(isinstance(item, Extension) for item in items)\n\n\ndef find_package_data(base, globs, root=\"mypy\"):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([os.path.relpath(f, root) for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"mypy\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as stream:\n stream.write(f'__version__ = \"{version}\"\\n')\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {\"build_py\": CustomPythonBuild}\n\npackage_data = [\"py.typed\"]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"typeshed\"), [\"*.py\", \"*.pyi\"])\npackage_data += [os.path.join(\"mypy\", \"typeshed\", \"stdlib\", \"VERSIONS\")]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"xml\"), [\"*.xsd\", \"*.xslt\", \"*.css\"])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and \"--use-mypyc\" in sys.argv:\n sys.argv.remove(\"--use-mypyc\")\n USE_MYPYC = True\nif os.getenv(\"MYPY_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = tuple(\n os.path.join(\"mypy\", x)\n for x in (\n # Need to be runnable as scripts\n \"__main__.py\",\n \"pyinfo.py\",\n os.path.join(\"dmypy\", \"__main__.py\"),\n # Uses __getattr__/__setattr__\n \"split_namespace.py\",\n # Lies to mypy about code reachability\n \"bogus_type.py\",\n # We don't populate __file__ properly at the top level or something?\n # Also I think there would be problems with how we generate version.py.\n \"version.py\",\n # Skip these to reduce the size of the build\n \"stubtest.py\",\n \"stubgenc.py\",\n \"stubdoc.py\",\n \"stubutil.py\",\n )\n ) + (\n # Don't want to grab this accidentally\n os.path.join(\"mypyc\", \"lib-rt\", \"setup.py\"),\n # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700\n os.path.join(\"mypyc\", \"__main__.py\"),\n )\n\n everything = [os.path.join(\"mypy\", x) for x in find_package_data(\"mypy\", [\"*.py\"])] + [\n os.path.join(\"mypyc\", x) for x in find_package_data(\"mypyc\", [\"*.py\"], root=\"mypyc\")\n ]\n # Start with all the .py files\n all_real_pys = [\n x for x in everything if not x.startswith(os.path.join(\"mypy\", \"typeshed\") + os.sep)\n ]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [\n x\n for x in mypyc_targets\n if not x.startswith(\n (\n os.path.join(\"mypy\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"doc\") + os.sep,\n os.path.join(\"mypyc\", \"test-data\") + os.sep,\n )\n )\n ]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join(\"mypy\", \"test\", \"visitors.py\"))\n\n # The targets come out of file system apis in an unspecified\n # order. Sort them so that the mypyc output is deterministic.\n mypyc_targets.sort()\n\n use_other_mypyc = os.getenv(\"ALTERNATE_MYPYC_PATH\", None)\n if use_other_mypyc:\n # This bit is super unfortunate: we want to use a different\n # mypy/mypyc version, but we've already imported parts, so we\n # remove the modules that we've imported already, which will\n # let the right versions be imported by mypyc.\n del sys.modules[\"mypy\"]\n del sys.modules[\"mypy.version\"]\n del sys.modules[\"mypy.git\"]\n sys.path.insert(0, use_other_mypyc)\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n debug_level = os.getenv(\"MYPYC_DEBUG_LEVEL\", \"1\")\n force_multifile = os.getenv(\"MYPYC_MULTI_FILE\", \"\") == \"1\"\n ext_modules = mypycify(\n mypyc_targets + [\"--config-file=mypy_bootstrap.ini\"],\n opt_level=opt_level,\n debug_level=debug_level,\n # Use multi-file compilation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == \"win32\" or force_multifile,\n )\n assert is_list_of_setuptools_extension(ext_modules), \"Expected mypycify to use setuptools\"\n\nelse:\n ext_modules = []\n\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Typing :: Typed\",\n]\n\nsetup(\n name=\"mypy\",\n version=version,\n description=description,\n long_description=long_description,\n author=\"Jukka Lehtosalo\",\n author_email=\"[email protected]\",\n url=\"https://www.mypy-lang.org/\",\n license=\"MIT License\",\n py_modules=[],\n ext_modules=ext_modules,\n packages=find_packages(),\n package_data={\"mypy\": package_data},\n entry_points={\n \"console_scripts\": [\n \"mypy=mypy.__main__:console_entry\",\n \"stubgen=mypy.stubgen:main\",\n \"stubtest=mypy.stubtest:main\",\n \"dmypy=mypy.dmypy.client:console_entry\",\n \"mypyc=mypyc.__main__:main\",\n ]\n },\n classifiers=classifiers,\n cmdclass=cmdclass,\n # When changing this, also update mypy-requirements.txt.\n install_requires=[\n \"typing_extensions>=4.1.0\",\n \"mypy_extensions >= 1.0.0\",\n \"tomli>=1.1.0; python_version<'3.11'\",\n ],\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n },\n python_requires=\">=3.8\",\n include_package_data=True,\n project_urls={\n \"News\": \"https://mypy-lang.org/news.html\",\n \"Documentation\": \"https://mypy.readthedocs.io/en/stable/index.html\",\n \"Repository\": \"https://github.com/python/mypy\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif sys.version_info < (3, 8, 0):\n sys.stderr.write(\"ERROR: You need Python 3.8 or later to use mypy.\\n\")\n exit(1)\n\n# we'll import stuff from the source tree, let's ensure is on the sys path\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still needed for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_py import build_py\n\nfrom mypy.version import __version__ as version\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\ndescription = \"Optional static typing for Python\"\nlong_description = \"\"\"\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n\"\"\".lstrip()\n\n\ndef is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:\n return all(isinstance(item, Extension) for item in items)\n\n\ndef find_package_data(base, globs, root=\"mypy\"):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([os.path.relpath(f, root) for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"mypy\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as stream:\n stream.write(f'__version__ = \"{version}\"\\n')\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {\"build_py\": CustomPythonBuild}\n\npackage_data = [\"py.typed\"]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"typeshed\"), [\"*.py\", \"*.pyi\"])\npackage_data += [os.path.join(\"mypy\", \"typeshed\", \"stdlib\", \"VERSIONS\")]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"xml\"), [\"*.xsd\", \"*.xslt\", \"*.css\"])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and \"--use-mypyc\" in sys.argv:\n sys.argv.remove(\"--use-mypyc\")\n USE_MYPYC = True\nif os.getenv(\"MYPY_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = tuple(\n os.path.join(\"mypy\", x)\n for x in (\n # Need to be runnable as scripts\n \"__main__.py\",\n \"pyinfo.py\",\n os.path.join(\"dmypy\", \"__main__.py\"),\n # Uses __getattr__/__setattr__\n \"split_namespace.py\",\n # Lies to mypy about code reachability\n \"bogus_type.py\",\n # We don't populate __file__ properly at the top level or something?\n # Also I think there would be problems with how we generate version.py.\n \"version.py\",\n # Skip these to reduce the size of the build\n \"stubtest.py\",\n \"stubgenc.py\",\n \"stubdoc.py\",\n \"stubutil.py\",\n )\n ) + (\n # Don't want to grab this accidentally\n os.path.join(\"mypyc\", \"lib-rt\", \"setup.py\"),\n # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700\n os.path.join(\"mypyc\", \"__main__.py\"),\n )\n\n everything = [os.path.join(\"mypy\", x) for x in find_package_data(\"mypy\", [\"*.py\"])] + [\n os.path.join(\"mypyc\", x) for x in find_package_data(\"mypyc\", [\"*.py\"], root=\"mypyc\")\n ]\n # Start with all the .py files\n all_real_pys = [\n x for x in everything if not x.startswith(os.path.join(\"mypy\", \"typeshed\") + os.sep)\n ]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [\n x\n for x in mypyc_targets\n if not x.startswith(\n (\n os.path.join(\"mypy\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"doc\") + os.sep,\n os.path.join(\"mypyc\", \"test-data\") + os.sep,\n )\n )\n ]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join(\"mypy\", \"test\", \"visitors.py\"))\n\n # The targets come out of file system apis in an unspecified\n # order. Sort them so that the mypyc output is deterministic.\n mypyc_targets.sort()\n\n use_other_mypyc = os.getenv(\"ALTERNATE_MYPYC_PATH\", None)\n if use_other_mypyc:\n # This bit is super unfortunate: we want to use a different\n # mypy/mypyc version, but we've already imported parts, so we\n # remove the modules that we've imported already, which will\n # let the right versions be imported by mypyc.\n del sys.modules[\"mypy\"]\n del sys.modules[\"mypy.version\"]\n del sys.modules[\"mypy.git\"]\n sys.path.insert(0, use_other_mypyc)\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n debug_level = os.getenv(\"MYPYC_DEBUG_LEVEL\", \"1\")\n force_multifile = os.getenv(\"MYPYC_MULTI_FILE\", \"\") == \"1\"\n ext_modules = mypycify(\n mypyc_targets + [\"--config-file=mypy_bootstrap.ini\"],\n opt_level=opt_level,\n debug_level=debug_level,\n # Use multi-file compilation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == \"win32\" or force_multifile,\n )\n assert is_list_of_setuptools_extension(ext_modules), \"Expected mypycify to use setuptools\"\n\nelse:\n ext_modules = []\n\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Typing :: Typed\",\n]\n\nsetup(\n name=\"mypy\",\n version=version,\n description=description,\n long_description=long_description,\n author=\"Jukka Lehtosalo\",\n author_email=\"[email protected]\",\n url=\"https://www.mypy-lang.org/\",\n license=\"MIT License\",\n py_modules=[],\n ext_modules=ext_modules,\n packages=find_packages(),\n package_data={\"mypy\": package_data},\n entry_points={\n \"console_scripts\": [\n \"mypy=mypy.__main__:console_entry\",\n \"stubgen=mypy.stubgen:main\",\n \"stubtest=mypy.stubtest:main\",\n \"dmypy=mypy.dmypy.client:console_entry\",\n \"mypyc=mypyc.__main__:main\",\n ]\n },\n classifiers=classifiers,\n cmdclass=cmdclass,\n # When changing this, also update mypy-requirements.txt.\n install_requires=[\n \"typing_extensions>=4.1.0\",\n \"mypy_extensions >= 1.0.0\",\n \"tomli>=1.1.0; python_version<'3.11'\",\n ],\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n \"mypyc\": \"setuptools >= 50\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n },\n python_requires=\">=3.8\",\n include_package_data=True,\n project_urls={\n \"News\": \"https://mypy-lang.org/news.html\",\n \"Documentation\": \"https://mypy.readthedocs.io/en/stable/index.html\",\n \"Repository\": \"https://github.com/python/mypy\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/mypyc/doc/getting_started.rst b/mypyc/doc/getting_started.rst
index 2db8aae149ec..adc617419ffa 100644
--- a/mypyc/doc/getting_started.rst
+++ b/mypyc/doc/getting_started.rst
@@ -38,17 +38,17 @@ Installation
------------
Mypyc is shipped as part of the mypy distribution. Install mypy like
-this (you need Python 3.5 or later):
+this (you need Python 3.8 or later):
.. code-block::
- $ python3 -m pip install -U mypy
+ $ python3 -m pip install -U 'mypy[mypyc]'
On some systems you need to use this instead:
.. code-block::
- $ python -m pip install -U mypy
+ $ python -m pip install -U 'mypy[mypyc]'
Example program
---------------
diff --git a/setup.py b/setup.py
index bbb655ea4537..9b945c9047c9 100644
--- a/setup.py
+++ b/setup.py
@@ -227,6 +227,7 @@ def run(self):
# Same here.
extras_require={
"dmypy": "psutil >= 4.0",
+ "mypyc": "setuptools >= 50",
"python2": "",
"reports": "lxml",
"install-types": "pip",
|
microsoft__DeepSpeed-2698 | [BUG] the `benchmarks` folder is included upon installation
I noticed that while inspecting the conda package during my attempt to create a conda forge build.

The fix is likely as simple as adding `benchmarks` to `packages=find_packages(exclude=[....])` in the `setup.py` file.
| [
{
"content": "\"\"\"\nCopyright 2020 The Microsoft DeepSpeed Team\n\nDeepSpeed library\n\nTo build wheel on Windows:\n 1. Install pytorch, such as pytorch 1.12 + cuda 11.6\n 2. Install visual cpp build tool\n 3. Include cuda toolkit\n 4. Launch cmd console with Administrator privilege for creating required symlink folders\n\nCreate a new wheel via the following command:\n build_win.bat\n\nThe wheel will be located at: dist/*.whl\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\nfrom setuptools.command import egg_info\nimport time\n\ntorch_available = True\ntry:\n import torch\nexcept ImportError:\n torch_available = False\n print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \\\n 'Please visit https://pytorch.org/ to see how to properly install torch on your system.')\n\nfrom op_builder import get_default_compute_capabilities, OpBuilder\nfrom op_builder.all_ops import ALL_OPS\nfrom op_builder.builder import installed_cuda_version\n\n# fetch rocm state\nis_rocm_pytorch = OpBuilder.is_rocm_pytorch()\nrocm_version = OpBuilder.installed_rocm_version()\n\nRED_START = '\\033[31m'\nRED_END = '\\033[0m'\nERROR = f\"{RED_START} [ERROR] {RED_END}\"\n\n\ndef abort(msg):\n print(f\"{ERROR} {msg}\")\n assert False, msg\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ninstall_requires = fetch_requirements('requirements/requirements.txt')\nextras_require = {\n '1bit': [], # add cupy based on cuda/rocm version\n '1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),\n 'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),\n 'dev': fetch_requirements('requirements/requirements-dev.txt'),\n 'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),\n 'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),\n 'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),\n 'inf': fetch_requirements('requirements/requirements-inf.txt'),\n 'sd': fetch_requirements('requirements/requirements-sd.txt')\n}\n\n# Add specific cupy version to both onebit extension variants\nif torch_available and torch.cuda.is_available():\n cupy = None\n if is_rocm_pytorch:\n rocm_major, rocm_minor = rocm_version\n # XXX cupy support for rocm 5 is not available yet\n if rocm_major <= 4:\n cupy = f\"cupy-rocm-{rocm_major}-{rocm_minor}\"\n else:\n cupy = f\"cupy-cuda{''.join(map(str,installed_cuda_version()))}\"\n if cupy:\n extras_require['1bit'].append(cupy)\n extras_require['1bit_mpi'].append(cupy)\n\n# Make an [all] extra that installs all needed dependencies\nall_extras = set()\nfor extra in extras_require.items():\n for req in extra[1]:\n all_extras.add(req)\nextras_require['all'] = list(all_extras)\n\ncmdclass = {}\n\n# For any pre-installed ops force disable ninja\nif torch_available:\n from accelerator import get_accelerator\n cmdclass['build_ext'] = get_accelerator().build_extension().with_options(\n use_ninja=False)\n\nif torch_available:\n TORCH_MAJOR = torch.__version__.split('.')[0]\n TORCH_MINOR = torch.__version__.split('.')[1]\nelse:\n TORCH_MAJOR = \"0\"\n TORCH_MINOR = \"0\"\n\nif torch_available and not torch.cuda.is_available():\n # Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486\n print(\n \"[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only \"\n \"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing \"\n \"(compute capabilities 6.0, 6.1, 6.2)\")\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = get_default_compute_capabilities()\n\next_modules = []\n\n# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.\nBUILD_OP_PLATFORM = 1 if sys.platform == \"win32\" else 0\nBUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))\nprint(f\"DS_BUILD_OPS={BUILD_OP_DEFAULT}\")\n\nif BUILD_OP_DEFAULT:\n assert torch_available, \"Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops.\"\n\n\ndef command_exists(cmd):\n if sys.platform == \"win32\":\n result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 1\n else:\n result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 0\n\n\ndef op_envvar(op_name):\n assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \\\n f\"{op_name} is missing BUILD_VAR field\"\n return ALL_OPS[op_name].BUILD_VAR\n\n\ndef op_enabled(op_name):\n env_var = op_envvar(op_name)\n return int(os.environ.get(env_var, BUILD_OP_DEFAULT))\n\n\ncompatible_ops = dict.fromkeys(ALL_OPS.keys(), False)\ninstall_ops = dict.fromkeys(ALL_OPS.keys(), False)\nfor op_name, builder in ALL_OPS.items():\n op_compatible = builder.is_compatible()\n compatible_ops[op_name] = op_compatible\n\n # If op is requested but not available, throw an error\n if op_enabled(op_name) and not op_compatible:\n env_var = op_envvar(op_name)\n if env_var not in os.environ:\n builder.warning(f\"One can disable {op_name} with {env_var}=0\")\n abort(f\"Unable to pre-compile {op_name}\")\n\n # if op is compatible but install is not enabled (JIT mode)\n if is_rocm_pytorch and op_compatible and not op_enabled(op_name):\n builder.hipify_extension()\n\n # If op install enabled, add builder to extensions\n if op_enabled(op_name) and op_compatible:\n assert torch_available, f\"Unable to pre-compile {op_name}, please first install torch\"\n install_ops[op_name] = op_enabled(op_name)\n ext_modules.append(builder.builder())\n\nprint(f'Install Ops={install_ops}')\n\n# Write out version/git info\ngit_hash_cmd = \"git rev-parse --short HEAD\"\ngit_branch_cmd = \"git rev-parse --abbrev-ref HEAD\"\nif command_exists('git') and 'DS_BUILD_STRING' not in os.environ:\n try:\n result = subprocess.check_output(git_hash_cmd, shell=True)\n git_hash = result.decode('utf-8').strip()\n result = subprocess.check_output(git_branch_cmd, shell=True)\n git_branch = result.decode('utf-8').strip()\n except subprocess.CalledProcessError:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\nelse:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\n\n\ndef create_dir_symlink(src, dest):\n if not os.path.islink(dest):\n if os.path.exists(dest):\n os.remove(dest)\n assert not os.path.exists(dest)\n os.symlink(src, dest)\n\n\nif sys.platform == \"win32\":\n # This creates a symbolic links on Windows.\n # It needs Administrator privilege to create symlinks on Windows.\n create_dir_symlink('..\\\\..\\\\csrc', '.\\\\deepspeed\\\\ops\\\\csrc')\n create_dir_symlink('..\\\\..\\\\op_builder', '.\\\\deepspeed\\\\ops\\\\op_builder')\n create_dir_symlink('..\\\\accelerator', '.\\\\deepspeed\\\\accelerator')\n egg_info.manifest_maker.template = 'MANIFEST_win.in'\n\n# Parse the DeepSpeed version string from version.txt\nversion_str = open('version.txt', 'r').read().strip()\n\n# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.\n# example: DS_BUILD_STR=\".dev20201022\" python setup.py sdist bdist_wheel\n\n# Building wheel for distribution, update version file\nif 'DS_BUILD_STRING' in os.environ:\n # Build string env specified, probably building for distribution\n with open('build.txt', 'w') as fd:\n fd.write(os.environ.get('DS_BUILD_STRING'))\n version_str += os.environ.get('DS_BUILD_STRING')\nelif os.path.isfile('build.txt'):\n # build.txt exists, probably installing from distribution\n with open('build.txt', 'r') as fd:\n version_str += fd.read().strip()\nelse:\n # None of the above, probably installing from source\n version_str += f'+{git_hash}'\n\ntorch_version = \".\".join([TORCH_MAJOR, TORCH_MINOR])\nbf16_support = False\n# Set cuda_version to 0.0 if cpu-only\ncuda_version = \"0.0\"\nnccl_version = \"0.0\"\n# Set hip_version to 0.0 if cpu-only\nhip_version = \"0.0\"\nif torch_available and torch.version.cuda is not None:\n cuda_version = \".\".join(torch.version.cuda.split('.')[:2])\n if sys.platform != \"win32\":\n if isinstance(torch.cuda.nccl.version(), int):\n # This will break if minor version > 9\n nccl_version = \".\".join(str(torch.cuda.nccl.version())[:2])\n else:\n nccl_version = \".\".join(map(str, torch.cuda.nccl.version()[:2]))\n if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():\n bf16_support = torch.cuda.is_bf16_supported()\nif torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:\n hip_version = \".\".join(torch.version.hip.split('.')[:2])\ntorch_info = {\n \"version\": torch_version,\n \"bf16_support\": bf16_support,\n \"cuda_version\": cuda_version,\n \"nccl_version\": nccl_version,\n \"hip_version\": hip_version\n}\n\nprint(f\"version={version_str}, git_hash={git_hash}, git_branch={git_branch}\")\nwith open('deepspeed/git_version_info_installed.py', 'w') as fd:\n fd.write(f\"version='{version_str}'\\n\")\n fd.write(f\"git_hash='{git_hash}'\\n\")\n fd.write(f\"git_branch='{git_branch}'\\n\")\n fd.write(f\"installed_ops={install_ops}\\n\")\n fd.write(f\"compatible_ops={compatible_ops}\\n\")\n fd.write(f\"torch_info={torch_info}\\n\")\n\nprint(f'install_requires={install_requires}')\nprint(f'compatible_ops={compatible_ops}')\nprint(f'ext_modules={ext_modules}')\n\n# Parse README.md to make long_description for PyPI page.\nthisdir = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:\n readme_text = fin.read()\n\nstart_time = time.time()\n\nsetup(name='deepspeed',\n version=version_str,\n description='DeepSpeed library',\n long_description=readme_text,\n long_description_content_type='text/markdown',\n author='DeepSpeed Team',\n author_email='[email protected]',\n url='http://deepspeed.ai',\n project_urls={\n 'Documentation': 'https://deepspeed.readthedocs.io',\n 'Source': 'https://github.com/microsoft/DeepSpeed',\n },\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(exclude=[\n \"azure\",\n \"csrc\",\n \"docker\",\n \"docs\",\n \"examples\",\n \"op_builder\",\n \"release\",\n \"requirements\",\n \"scripts\",\n \"tests\"\n ]),\n include_package_data=True,\n scripts=[\n 'bin/deepspeed',\n 'bin/deepspeed.pt',\n 'bin/ds',\n 'bin/ds_ssh',\n 'bin/ds_report',\n 'bin/ds_bench',\n 'bin/dsr',\n 'bin/ds_elastic'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10'\n ],\n license='MIT',\n ext_modules=ext_modules,\n cmdclass=cmdclass)\n\nend_time = time.time()\nprint(f'deepspeed build time = {end_time - start_time} secs')\n",
"path": "setup.py"
}
] | [
{
"content": "\"\"\"\nCopyright 2020 The Microsoft DeepSpeed Team\n\nDeepSpeed library\n\nTo build wheel on Windows:\n 1. Install pytorch, such as pytorch 1.12 + cuda 11.6\n 2. Install visual cpp build tool\n 3. Include cuda toolkit\n 4. Launch cmd console with Administrator privilege for creating required symlink folders\n\nCreate a new wheel via the following command:\n build_win.bat\n\nThe wheel will be located at: dist/*.whl\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\nfrom setuptools.command import egg_info\nimport time\n\ntorch_available = True\ntry:\n import torch\nexcept ImportError:\n torch_available = False\n print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \\\n 'Please visit https://pytorch.org/ to see how to properly install torch on your system.')\n\nfrom op_builder import get_default_compute_capabilities, OpBuilder\nfrom op_builder.all_ops import ALL_OPS\nfrom op_builder.builder import installed_cuda_version\n\n# fetch rocm state\nis_rocm_pytorch = OpBuilder.is_rocm_pytorch()\nrocm_version = OpBuilder.installed_rocm_version()\n\nRED_START = '\\033[31m'\nRED_END = '\\033[0m'\nERROR = f\"{RED_START} [ERROR] {RED_END}\"\n\n\ndef abort(msg):\n print(f\"{ERROR} {msg}\")\n assert False, msg\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ninstall_requires = fetch_requirements('requirements/requirements.txt')\nextras_require = {\n '1bit': [], # add cupy based on cuda/rocm version\n '1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),\n 'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),\n 'dev': fetch_requirements('requirements/requirements-dev.txt'),\n 'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),\n 'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),\n 'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),\n 'inf': fetch_requirements('requirements/requirements-inf.txt'),\n 'sd': fetch_requirements('requirements/requirements-sd.txt')\n}\n\n# Add specific cupy version to both onebit extension variants\nif torch_available and torch.cuda.is_available():\n cupy = None\n if is_rocm_pytorch:\n rocm_major, rocm_minor = rocm_version\n # XXX cupy support for rocm 5 is not available yet\n if rocm_major <= 4:\n cupy = f\"cupy-rocm-{rocm_major}-{rocm_minor}\"\n else:\n cupy = f\"cupy-cuda{''.join(map(str,installed_cuda_version()))}\"\n if cupy:\n extras_require['1bit'].append(cupy)\n extras_require['1bit_mpi'].append(cupy)\n\n# Make an [all] extra that installs all needed dependencies\nall_extras = set()\nfor extra in extras_require.items():\n for req in extra[1]:\n all_extras.add(req)\nextras_require['all'] = list(all_extras)\n\ncmdclass = {}\n\n# For any pre-installed ops force disable ninja\nif torch_available:\n from accelerator import get_accelerator\n cmdclass['build_ext'] = get_accelerator().build_extension().with_options(\n use_ninja=False)\n\nif torch_available:\n TORCH_MAJOR = torch.__version__.split('.')[0]\n TORCH_MINOR = torch.__version__.split('.')[1]\nelse:\n TORCH_MAJOR = \"0\"\n TORCH_MINOR = \"0\"\n\nif torch_available and not torch.cuda.is_available():\n # Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486\n print(\n \"[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only \"\n \"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing \"\n \"(compute capabilities 6.0, 6.1, 6.2)\")\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = get_default_compute_capabilities()\n\next_modules = []\n\n# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.\nBUILD_OP_PLATFORM = 1 if sys.platform == \"win32\" else 0\nBUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))\nprint(f\"DS_BUILD_OPS={BUILD_OP_DEFAULT}\")\n\nif BUILD_OP_DEFAULT:\n assert torch_available, \"Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops.\"\n\n\ndef command_exists(cmd):\n if sys.platform == \"win32\":\n result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 1\n else:\n result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 0\n\n\ndef op_envvar(op_name):\n assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \\\n f\"{op_name} is missing BUILD_VAR field\"\n return ALL_OPS[op_name].BUILD_VAR\n\n\ndef op_enabled(op_name):\n env_var = op_envvar(op_name)\n return int(os.environ.get(env_var, BUILD_OP_DEFAULT))\n\n\ncompatible_ops = dict.fromkeys(ALL_OPS.keys(), False)\ninstall_ops = dict.fromkeys(ALL_OPS.keys(), False)\nfor op_name, builder in ALL_OPS.items():\n op_compatible = builder.is_compatible()\n compatible_ops[op_name] = op_compatible\n\n # If op is requested but not available, throw an error\n if op_enabled(op_name) and not op_compatible:\n env_var = op_envvar(op_name)\n if env_var not in os.environ:\n builder.warning(f\"One can disable {op_name} with {env_var}=0\")\n abort(f\"Unable to pre-compile {op_name}\")\n\n # if op is compatible but install is not enabled (JIT mode)\n if is_rocm_pytorch and op_compatible and not op_enabled(op_name):\n builder.hipify_extension()\n\n # If op install enabled, add builder to extensions\n if op_enabled(op_name) and op_compatible:\n assert torch_available, f\"Unable to pre-compile {op_name}, please first install torch\"\n install_ops[op_name] = op_enabled(op_name)\n ext_modules.append(builder.builder())\n\nprint(f'Install Ops={install_ops}')\n\n# Write out version/git info\ngit_hash_cmd = \"git rev-parse --short HEAD\"\ngit_branch_cmd = \"git rev-parse --abbrev-ref HEAD\"\nif command_exists('git') and 'DS_BUILD_STRING' not in os.environ:\n try:\n result = subprocess.check_output(git_hash_cmd, shell=True)\n git_hash = result.decode('utf-8').strip()\n result = subprocess.check_output(git_branch_cmd, shell=True)\n git_branch = result.decode('utf-8').strip()\n except subprocess.CalledProcessError:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\nelse:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\n\n\ndef create_dir_symlink(src, dest):\n if not os.path.islink(dest):\n if os.path.exists(dest):\n os.remove(dest)\n assert not os.path.exists(dest)\n os.symlink(src, dest)\n\n\nif sys.platform == \"win32\":\n # This creates a symbolic links on Windows.\n # It needs Administrator privilege to create symlinks on Windows.\n create_dir_symlink('..\\\\..\\\\csrc', '.\\\\deepspeed\\\\ops\\\\csrc')\n create_dir_symlink('..\\\\..\\\\op_builder', '.\\\\deepspeed\\\\ops\\\\op_builder')\n create_dir_symlink('..\\\\accelerator', '.\\\\deepspeed\\\\accelerator')\n egg_info.manifest_maker.template = 'MANIFEST_win.in'\n\n# Parse the DeepSpeed version string from version.txt\nversion_str = open('version.txt', 'r').read().strip()\n\n# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.\n# example: DS_BUILD_STR=\".dev20201022\" python setup.py sdist bdist_wheel\n\n# Building wheel for distribution, update version file\nif 'DS_BUILD_STRING' in os.environ:\n # Build string env specified, probably building for distribution\n with open('build.txt', 'w') as fd:\n fd.write(os.environ.get('DS_BUILD_STRING'))\n version_str += os.environ.get('DS_BUILD_STRING')\nelif os.path.isfile('build.txt'):\n # build.txt exists, probably installing from distribution\n with open('build.txt', 'r') as fd:\n version_str += fd.read().strip()\nelse:\n # None of the above, probably installing from source\n version_str += f'+{git_hash}'\n\ntorch_version = \".\".join([TORCH_MAJOR, TORCH_MINOR])\nbf16_support = False\n# Set cuda_version to 0.0 if cpu-only\ncuda_version = \"0.0\"\nnccl_version = \"0.0\"\n# Set hip_version to 0.0 if cpu-only\nhip_version = \"0.0\"\nif torch_available and torch.version.cuda is not None:\n cuda_version = \".\".join(torch.version.cuda.split('.')[:2])\n if sys.platform != \"win32\":\n if isinstance(torch.cuda.nccl.version(), int):\n # This will break if minor version > 9\n nccl_version = \".\".join(str(torch.cuda.nccl.version())[:2])\n else:\n nccl_version = \".\".join(map(str, torch.cuda.nccl.version()[:2]))\n if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():\n bf16_support = torch.cuda.is_bf16_supported()\nif torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:\n hip_version = \".\".join(torch.version.hip.split('.')[:2])\ntorch_info = {\n \"version\": torch_version,\n \"bf16_support\": bf16_support,\n \"cuda_version\": cuda_version,\n \"nccl_version\": nccl_version,\n \"hip_version\": hip_version\n}\n\nprint(f\"version={version_str}, git_hash={git_hash}, git_branch={git_branch}\")\nwith open('deepspeed/git_version_info_installed.py', 'w') as fd:\n fd.write(f\"version='{version_str}'\\n\")\n fd.write(f\"git_hash='{git_hash}'\\n\")\n fd.write(f\"git_branch='{git_branch}'\\n\")\n fd.write(f\"installed_ops={install_ops}\\n\")\n fd.write(f\"compatible_ops={compatible_ops}\\n\")\n fd.write(f\"torch_info={torch_info}\\n\")\n\nprint(f'install_requires={install_requires}')\nprint(f'compatible_ops={compatible_ops}')\nprint(f'ext_modules={ext_modules}')\n\n# Parse README.md to make long_description for PyPI page.\nthisdir = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:\n readme_text = fin.read()\n\nstart_time = time.time()\n\nsetup(name='deepspeed',\n version=version_str,\n description='DeepSpeed library',\n long_description=readme_text,\n long_description_content_type='text/markdown',\n author='DeepSpeed Team',\n author_email='[email protected]',\n url='http://deepspeed.ai',\n project_urls={\n 'Documentation': 'https://deepspeed.readthedocs.io',\n 'Source': 'https://github.com/microsoft/DeepSpeed',\n },\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(exclude=[\n \"azure\",\n \"csrc\",\n \"docker\",\n \"docs\",\n \"examples\",\n \"op_builder\",\n \"release\",\n \"requirements\",\n \"scripts\",\n \"tests\",\n \"benchmarks\",\n \"accelerator\"\n ]),\n include_package_data=True,\n scripts=[\n 'bin/deepspeed',\n 'bin/deepspeed.pt',\n 'bin/ds',\n 'bin/ds_ssh',\n 'bin/ds_report',\n 'bin/ds_bench',\n 'bin/dsr',\n 'bin/ds_elastic'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10'\n ],\n license='MIT',\n ext_modules=ext_modules,\n cmdclass=cmdclass)\n\nend_time = time.time()\nprint(f'deepspeed build time = {end_time - start_time} secs')\n",
"path": "setup.py"
}
] | diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml
index 6ebd9c6d1e9f..f05f3056994b 100644
--- a/.github/workflows/formatting.yml
+++ b/.github/workflows/formatting.yml
@@ -22,8 +22,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- - id: setup-venv
- uses: ./.github/workflows/setup-venv
+ - name: environment
+ run: |
+ which python
+ python --version
- name: Install deepspeed
run: |
@@ -32,4 +34,5 @@ jobs:
- name: Formatting checks
run: |
+ pip show pre-commit clang-format
pre-commit run --all-files
diff --git a/setup.py b/setup.py
index ae9ece39e3f1..dd942b7b37c1 100755
--- a/setup.py
+++ b/setup.py
@@ -291,7 +291,9 @@ def create_dir_symlink(src, dest):
"release",
"requirements",
"scripts",
- "tests"
+ "tests",
+ "benchmarks",
+ "accelerator"
]),
include_package_data=True,
scripts=[
|
sopel-irc__sopel-555 | Configuration error with python 3.3
On the first run of willie 4.4.1, installed from pip, the configuration file can't be created due to the use of raw_input(), function replaced by input() in python3.
here is the error :
``` bash
Welcome to Willie!
I can't seem to find the configuration file, so let's generate it!
Please answer the following questions to create your configuration file:
Encountered an error while writing the config file. This shouldn't happen. Check permissions.
Traceback (most recent call last):
File "/home/willie/IRC/bin/willie", line 213, in <module>
main()
File "/home/willie/IRC/bin/willie", line 131, in main
create_config(configpath)
File "/home/willie/IRC/lib/python3.3/site-packages/willie/config.py", line 450, in create_config
config._core()
File "/home/willie/IRC/lib/python3.3/site-packages/willie/config.py", line 298, in _core
'Willie')
File "/home/willie/IRC/lib/python3.3/site-packages/willie/config.py", line 228, in interactive_add
value = raw_input(prompt + ' [%s]: ' % default) or default
NameError: global name 'raw_input' is not defined
```
If someone can reproduce the problem I'll submit a pull request
| [
{
"content": "# coding=utf8\n\"\"\"\n*Availability: 3+ for all functions; attributes may vary.*\n\nThe config class is an abstraction class for accessing the active Willie\nconfiguration file.\n\nThe Willie config file is divided to sections, and each section contains keys\nand values. A section is an attribute of the config class, and is of type\n``ConfigSection``. Each section contains the keys as attributes. For example,\nif you want to access key example from section test, use\n``config.test.example``. Note that the key names are made lower-case by the\nparser, regardless of whether they are upper-case in the file.\n\nThe ``core`` section will always be present, and contains configuration used by\nthe Willie core. Modules are allowed to read those, but must not change them.\n\nThe config file can store strings, booleans and lists. If you need to store a\nnumber, cast it to ``int()`` when reading.\n\nFor backwards compatibility, every key in the core section is an attribute of\nthe config class as well as of config.core. For new code, always specify the\nname of the section, because this behavior might be removed in the future.\n\nRunning the ``config.py`` file directly will give the user an interactive\nseries of dialogs to create the configuration file. This will guide the user\nthrough creating settings for the Willie core, the settings database, and any\nmodules which have a configuration function.\n\nThe configuration function, if used, must be declared with the signature\n``configure(config)``. To add options, use ``interactive_add``, ``add_list``\nand ``add_option``.\n\"\"\"\n#Copyright 2012, Edward Powell, embolalia.net\n#Copyright © 2012, Elad Alfassa <[email protected]>\n#Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport willie.db as db\nfrom willie.tools import iteritems\nimport os\nimport sys\ntry:\n import ConfigParser\nexcept ImportError:\n import configparser as ConfigParser\nimport getpass\nimport imp\nimport willie.bot\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n\nclass ConfigurationError(Exception):\n \"\"\" Exception type for configuration errors \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return 'ConfigurationError: %s' % self.value\n\n\nclass Config(object):\n def __init__(self, filename, load=True, ignore_errors=False):\n \"\"\"Return a configuration object.\n\n The given filename will be associated with the configuration, and is\n the file which will be written if write() is called. If load is not\n given or True, the configuration object will load the attributes from\n the file at filename.\n\n A few default values will be set here if they are not defined in the\n config file, or a config file is not loaded. They are documented below.\n\n \"\"\"\n self.filename = filename\n \"\"\"The config object's associated file, as noted above.\"\"\"\n self.parser = ConfigParser.RawConfigParser(allow_no_value=True)\n if load:\n self.parser.read(self.filename)\n\n if not ignore_errors:\n #Sanity check for the configuration file:\n if not self.parser.has_section('core'):\n raise ConfigurationError('Core section missing!')\n if not self.parser.has_option('core', 'nick'):\n raise ConfigurationError(\n 'Bot IRC nick not defined,'\n ' expected option `nick` in [core] section'\n )\n if not self.parser.has_option('core', 'owner'):\n raise ConfigurationError(\n 'Bot owner not defined,'\n ' expected option `owner` in [core] section'\n )\n if not self.parser.has_option('core', 'host'):\n raise ConfigurationError(\n 'IRC server address not defined,'\n ' expceted option `host` in [core] section'\n )\n\n #Setting defaults:\n if not self.parser.has_option('core', 'port'):\n self.parser.set('core', 'port', '6667')\n if not self.parser.has_option('core', 'user'):\n self.parser.set('core', 'user', 'willie')\n if not self.parser.has_option('core', 'name'):\n self.parser.set('core', 'name',\n 'Willie Embosbot, http://willie.dftba.net')\n if not self.parser.has_option('core', 'prefix'):\n self.parser.set('core', 'prefix', r'\\.')\n if not self.parser.has_option('core', 'admins'):\n self.parser.set('core', 'admins', '')\n if not self.parser.has_option('core', 'verify_ssl'):\n self.parser.set('core', 'verify_ssl', 'True')\n if not self.parser.has_option('core', 'timeout'):\n self.parser.set('core', 'timeout', '120')\n else:\n self.parser.add_section('core')\n\n def save(self):\n \"\"\"Save all changes to the config file.\"\"\"\n cfgfile = open(self.filename, 'w')\n self.parser.write(cfgfile)\n cfgfile.flush()\n cfgfile.close()\n\n def add_section(self, name):\n \"\"\"Add a section to the config file.\n\n Returns ``False`` if already exists.\n\n \"\"\"\n try:\n return self.parser.add_section(name)\n except ConfigParser.DuplicateSectionError:\n return False\n\n def has_option(self, section, name):\n \"\"\"Check if option ``name`` exists under section ``section``.\"\"\"\n return self.parser.has_option(section, name)\n\n def has_section(self, name):\n \"\"\"Check if section ``name`` exists.\"\"\"\n return self.parser.has_section(name)\n\n class ConfigSection(object):\n\n \"\"\"Represents a section of the config file.\n\n Contains all keys in thesection as attributes.\n\n \"\"\"\n\n def __init__(self, name, items, parent):\n object.__setattr__(self, '_name', name)\n object.__setattr__(self, '_parent', parent)\n for item in items:\n value = item[1].strip()\n if not value.lower() == 'none':\n if value.lower() == 'false':\n value = False\n object.__setattr__(self, item[0], value)\n\n def __getattr__(self, name):\n return None\n\n def __setattr__(self, name, value):\n object.__setattr__(self, name, value)\n if type(value) is list:\n value = ','.join(value)\n self._parent.parser.set(self._name, name, value)\n\n def get_list(self, name):\n value = getattr(self, name)\n if not value:\n return []\n if isinstance(value, basestring):\n value = value.split(',')\n # Keep the split value, so we don't have to keep doing this\n setattr(self, name, value)\n return value\n\n def __getattr__(self, name):\n \"\"\"\"\"\"\n if name in self.parser.sections():\n items = self.parser.items(name)\n section = self.ConfigSection(name, items, self) # Return a section\n setattr(self, name, section)\n return section\n elif self.parser.has_option('core', name):\n return self.parser.get('core', name) # For backwards compatibility\n else:\n raise AttributeError(\"%r object has no attribute %r\"\n % (type(self).__name__, name))\n\n def interactive_add(self, section, option, prompt, default=None,\n ispass=False):\n \"\"\"Ask for the value to assign to ``option`` under ``section``.\n\n Ask user in terminal for the value to assign to ``option`` under\n ``section``. If ``default`` is passed, it will be shown as the default\n value in the prompt. If ``option`` is already defined in ``section``,\n it will be used instead of ``default``, regardless of wheather\n ``default`` is passed.\n\n \"\"\"\n if not self.parser.has_section(section):\n self.parser.add_section(section)\n if self.parser.has_option(section, option):\n atr = self.parser.get(section, option)\n if ispass:\n value = getpass.getpass(prompt + ' [%s]: ' % atr) or atr\n self.parser.set(section, option, value)\n else:\n value = raw_input(prompt + ' [%s]: ' % atr) or atr\n self.parser.set(section, option, value)\n elif default:\n if ispass:\n value = getpass.getpass(\n prompt + ' [%s]: ' % default\n ) or default\n self.parser.set(section, option, value)\n else:\n value = raw_input(prompt + ' [%s]: ' % default) or default\n self.parser.set(section, option, value)\n else:\n value = ''\n while not value:\n if ispass:\n value = getpass.getpass(prompt + ': ')\n else:\n value = raw_input(prompt + ': ')\n self.parser.set(section, option, value)\n\n def add_list(self, section, option, message, prompt):\n \"\"\"Ask for a list to assign to ``option``.\n\n Ask user in terminal for a list to assign to ``option``. If ``option``\n is already defined under ``section``, show the user the current values\n and ask if the user would like to keep them. If so, additional values\n can be entered.\n\n \"\"\"\n print(message)\n lst = []\n if self.parser.has_option(section, option) and self.parser.get(section,\n option):\n m = \"You currently have \" + self.parser.get(section, option)\n if self.option(m + '. Would you like to keep them', True):\n lst = self.parser.get(section, option).split(',')\n mem = raw_input(prompt + ' ')\n while mem:\n lst.append(mem)\n mem = raw_input(prompt + ' ')\n self.parser.set(section, option, ','.join(lst))\n\n def add_option(self, section, option, question, default=False):\n \"\"\"Ask \"y/n\" and set `option` based in the response.\n\n Show user in terminal a \"y/n\" prompt, and set `option` to True or False\n based on the response. If default is passed as true, the default will\n be shown as ``[y]``, else it will be ``[n]``. ``question`` should be\n phrased as a question, but without a question mark at the end. If\n ``option`` is already defined, it will be used instead of ``default``,\n regardless of wheather ``default`` is passed.\n\n \"\"\"\n if not self.parser.has_section(section):\n self.parser.add_section(section)\n if self.parser.has_option(section, option):\n default = self.parser.getboolean(section, option)\n answer = self.option(question, default)\n self.parser.set(section, option, str(answer))\n\n def option(self, question, default=False):\n \"\"\"Ask \"y/n\" and return the corresponding boolean answer.\n\n Show user in terminal a \"y/n\" prompt, and return true or false based on\n the response. If default is passed as true, the default will be shown\n as ``[y]``, else it will be ``[n]``. ``question`` should be phrased as\n a question, but without a question mark at the end.\n\n \"\"\"\n d = 'n'\n if default:\n d = 'y'\n ans = raw_input(question + ' (y/n)? [' + d + '] ')\n if not ans:\n ans = d\n return ans.lower() == 'y'\n\n def _core(self):\n self.interactive_add('core', 'nick', 'Enter the nickname for your bot',\n 'Willie')\n self.interactive_add('core', 'host', 'Enter the server to connect to',\n 'irc.dftba.net')\n self.add_option('core', 'use_ssl', 'Should the bot connect with SSL')\n if self.use_ssl == 'True':\n default_port = '6697'\n else:\n default_port = '6667'\n self.interactive_add('core', 'port', 'Enter the port to connect on',\n default_port)\n self.interactive_add(\n 'core', 'owner',\n \"Enter your own IRC name (or that of the bot's owner)\"\n )\n c = 'Enter the channels to connect to by default, one at a time.' + \\\n ' When done, hit enter again.'\n self.add_list('core', 'channels', c, 'Channel:')\n\n def _db(self):\n db.configure(self)\n self.save()\n\n def _modules(self):\n home = os.getcwd()\n modules_dir = os.path.join(home, 'modules')\n filenames = self.enumerate_modules()\n os.sys.path.insert(0, modules_dir)\n for name, filename in iteritems(filenames):\n try:\n module = imp.load_source(name, filename)\n except Exception as e:\n print(\"Error loading %s: %s (in config.py)\"\n % (name, e), file=sys.stderr)\n else:\n if hasattr(module, 'configure'):\n module.configure(self)\n self.save()\n\n def enumerate_modules(self, show_all=False):\n \"\"\"Map the names of modules to the location of their file.\n\n *Availability: 4.0+*\n\n Return a dict mapping the names of modules to the location of their\n file. This searches the regular modules directory and all directories\n specified in the `core.extra` attribute of the `config` object. If two\n modules have the same name, the last one to be found will be returned\n and the rest will be ignored. Modules are found starting in the regular\n directory, followed by `~/.willie/modules`, and then through the extra\n directories in the order that the are specified.\n\n If `show_all` is given as `True`, the `enable` and `exclude`\n configuration options will be ignored, and all modules will be shown\n (though duplicates will still be ignored as above).\n\n \"\"\"\n modules = {}\n\n # First, add modules from the regular modules directory\n this_dir = os.path.dirname(os.path.abspath(__file__))\n modules_dir = os.path.join(this_dir, 'modules')\n for fn in os.listdir(modules_dir):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(modules_dir, fn)\n # Next, look in ~/.willie/modules\n if self.core.homedir is not None:\n home_modules_dir = os.path.join(self.core.homedir, 'modules')\n else:\n home_modules_dir = os.path.join(os.path.expanduser('~'), '.willie',\n 'modules')\n if not os.path.isdir(home_modules_dir):\n os.makedirs(home_modules_dir)\n for fn in os.listdir(home_modules_dir):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(home_modules_dir, fn)\n\n # Last, look at all the extra directories. (get_list returns [] if\n # there are none or the option isn't defined, so it'll just skip this\n # bit)\n for directory in self.core.get_list('extra'):\n for fn in os.listdir(directory):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(directory, fn)\n\n # If caller wants all of them, don't apply white and blacklists\n if show_all:\n return modules\n\n # Apply whitelist, if present\n enable = self.core.get_list('enable')\n if enable:\n enabled_modules = {}\n for module in enable:\n if module in modules:\n enabled_modules[module] = modules[module]\n modules = enabled_modules\n\n # Apply blacklist, if present\n exclude = self.core.get_list('exclude')\n for module in exclude:\n if module in modules:\n del modules[module]\n\n return modules\n\n\ndef wizard(section, config=None):\n dotdir = os.path.expanduser('~/.willie')\n configpath = os.path.join(dotdir, (config or 'default') + '.cfg')\n if section == 'all':\n create_config(configpath)\n elif section == 'db':\n check_dir(False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" + \\\n \" Please make one before configuring these options.\")\n sys.exit(1)\n config = Config(configpath, True)\n config._db()\n elif section == 'mod':\n check_dir(False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" + \\\n \" Please make one before configuring these options.\")\n sys.exit(1)\n config = Config(configpath, True)\n config._modules()\n\n\ndef check_dir(create=True):\n dotdir = os.path.join(os.path.expanduser('~'), '.willie')\n if not os.path.isdir(dotdir):\n if create:\n print('Creating a config directory at ~/.willie...')\n try:\n os.makedirs(dotdir)\n except Exception as e:\n print('There was a problem creating %s:' % dotdir, file=sys.stderr)\n print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)\n print('Please fix this and then run Willie again.', file=sys.stderr)\n sys.exit(1)\n else:\n print(\"No config file found. Please make one before configuring these options.\")\n sys.exit(1)\n\n\ndef create_config(configpath):\n check_dir()\n print(\"Please answer the following questions\" + \\\n \" to create your configuration file:\\n\")\n try:\n config = Config(configpath, os.path.isfile(configpath))\n config._core()\n if config.option(\"Would you like to set up a settings database now\"):\n config._db()\n if config.option(\n 'Would you like to see if there are any modules'\n ' that need configuring'\n ):\n config._modules()\n config.save()\n except Exception as e:\n print(\"Encountered an error while writing the config file.\" + \\\n \" This shouldn't happen. Check permissions.\")\n raise\n sys.exit(1)\n print(\"Config file written sucessfully!\")\n",
"path": "willie/config.py"
}
] | [
{
"content": "# coding=utf8\n\"\"\"\n*Availability: 3+ for all functions; attributes may vary.*\n\nThe config class is an abstraction class for accessing the active Willie\nconfiguration file.\n\nThe Willie config file is divided to sections, and each section contains keys\nand values. A section is an attribute of the config class, and is of type\n``ConfigSection``. Each section contains the keys as attributes. For example,\nif you want to access key example from section test, use\n``config.test.example``. Note that the key names are made lower-case by the\nparser, regardless of whether they are upper-case in the file.\n\nThe ``core`` section will always be present, and contains configuration used by\nthe Willie core. Modules are allowed to read those, but must not change them.\n\nThe config file can store strings, booleans and lists. If you need to store a\nnumber, cast it to ``int()`` when reading.\n\nFor backwards compatibility, every key in the core section is an attribute of\nthe config class as well as of config.core. For new code, always specify the\nname of the section, because this behavior might be removed in the future.\n\nRunning the ``config.py`` file directly will give the user an interactive\nseries of dialogs to create the configuration file. This will guide the user\nthrough creating settings for the Willie core, the settings database, and any\nmodules which have a configuration function.\n\nThe configuration function, if used, must be declared with the signature\n``configure(config)``. To add options, use ``interactive_add``, ``add_list``\nand ``add_option``.\n\"\"\"\n#Copyright 2012, Edward Powell, embolalia.net\n#Copyright © 2012, Elad Alfassa <[email protected]>\n#Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport willie.db as db\nfrom willie.tools import iteritems\nimport os\nimport sys\ntry:\n import ConfigParser\nexcept ImportError:\n import configparser as ConfigParser\nimport getpass\nimport imp\nimport willie.bot\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n raw_input = input\n\nclass ConfigurationError(Exception):\n \"\"\" Exception type for configuration errors \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return 'ConfigurationError: %s' % self.value\n\n\nclass Config(object):\n def __init__(self, filename, load=True, ignore_errors=False):\n \"\"\"Return a configuration object.\n\n The given filename will be associated with the configuration, and is\n the file which will be written if write() is called. If load is not\n given or True, the configuration object will load the attributes from\n the file at filename.\n\n A few default values will be set here if they are not defined in the\n config file, or a config file is not loaded. They are documented below.\n\n \"\"\"\n self.filename = filename\n \"\"\"The config object's associated file, as noted above.\"\"\"\n self.parser = ConfigParser.RawConfigParser(allow_no_value=True)\n if load:\n self.parser.read(self.filename)\n\n if not ignore_errors:\n #Sanity check for the configuration file:\n if not self.parser.has_section('core'):\n raise ConfigurationError('Core section missing!')\n if not self.parser.has_option('core', 'nick'):\n raise ConfigurationError(\n 'Bot IRC nick not defined,'\n ' expected option `nick` in [core] section'\n )\n if not self.parser.has_option('core', 'owner'):\n raise ConfigurationError(\n 'Bot owner not defined,'\n ' expected option `owner` in [core] section'\n )\n if not self.parser.has_option('core', 'host'):\n raise ConfigurationError(\n 'IRC server address not defined,'\n ' expceted option `host` in [core] section'\n )\n\n #Setting defaults:\n if not self.parser.has_option('core', 'port'):\n self.parser.set('core', 'port', '6667')\n if not self.parser.has_option('core', 'user'):\n self.parser.set('core', 'user', 'willie')\n if not self.parser.has_option('core', 'name'):\n self.parser.set('core', 'name',\n 'Willie Embosbot, http://willie.dftba.net')\n if not self.parser.has_option('core', 'prefix'):\n self.parser.set('core', 'prefix', r'\\.')\n if not self.parser.has_option('core', 'admins'):\n self.parser.set('core', 'admins', '')\n if not self.parser.has_option('core', 'verify_ssl'):\n self.parser.set('core', 'verify_ssl', 'True')\n if not self.parser.has_option('core', 'timeout'):\n self.parser.set('core', 'timeout', '120')\n else:\n self.parser.add_section('core')\n\n def save(self):\n \"\"\"Save all changes to the config file.\"\"\"\n cfgfile = open(self.filename, 'w')\n self.parser.write(cfgfile)\n cfgfile.flush()\n cfgfile.close()\n\n def add_section(self, name):\n \"\"\"Add a section to the config file.\n\n Returns ``False`` if already exists.\n\n \"\"\"\n try:\n return self.parser.add_section(name)\n except ConfigParser.DuplicateSectionError:\n return False\n\n def has_option(self, section, name):\n \"\"\"Check if option ``name`` exists under section ``section``.\"\"\"\n return self.parser.has_option(section, name)\n\n def has_section(self, name):\n \"\"\"Check if section ``name`` exists.\"\"\"\n return self.parser.has_section(name)\n\n class ConfigSection(object):\n\n \"\"\"Represents a section of the config file.\n\n Contains all keys in thesection as attributes.\n\n \"\"\"\n\n def __init__(self, name, items, parent):\n object.__setattr__(self, '_name', name)\n object.__setattr__(self, '_parent', parent)\n for item in items:\n value = item[1].strip()\n if not value.lower() == 'none':\n if value.lower() == 'false':\n value = False\n object.__setattr__(self, item[0], value)\n\n def __getattr__(self, name):\n return None\n\n def __setattr__(self, name, value):\n object.__setattr__(self, name, value)\n if type(value) is list:\n value = ','.join(value)\n self._parent.parser.set(self._name, name, value)\n\n def get_list(self, name):\n value = getattr(self, name)\n if not value:\n return []\n if isinstance(value, basestring):\n value = value.split(',')\n # Keep the split value, so we don't have to keep doing this\n setattr(self, name, value)\n return value\n\n def __getattr__(self, name):\n \"\"\"\"\"\"\n if name in self.parser.sections():\n items = self.parser.items(name)\n section = self.ConfigSection(name, items, self) # Return a section\n setattr(self, name, section)\n return section\n elif self.parser.has_option('core', name):\n return self.parser.get('core', name) # For backwards compatibility\n else:\n raise AttributeError(\"%r object has no attribute %r\"\n % (type(self).__name__, name))\n\n def interactive_add(self, section, option, prompt, default=None,\n ispass=False):\n \"\"\"Ask for the value to assign to ``option`` under ``section``.\n\n Ask user in terminal for the value to assign to ``option`` under\n ``section``. If ``default`` is passed, it will be shown as the default\n value in the prompt. If ``option`` is already defined in ``section``,\n it will be used instead of ``default``, regardless of wheather\n ``default`` is passed.\n\n \"\"\"\n if not self.parser.has_section(section):\n self.parser.add_section(section)\n if self.parser.has_option(section, option):\n atr = self.parser.get(section, option)\n if ispass:\n value = getpass.getpass(prompt + ' [%s]: ' % atr) or atr\n self.parser.set(section, option, value)\n else:\n value = raw_input(prompt + ' [%s]: ' % atr) or atr\n self.parser.set(section, option, value)\n elif default:\n if ispass:\n value = getpass.getpass(\n prompt + ' [%s]: ' % default\n ) or default\n self.parser.set(section, option, value)\n else:\n value = raw_input(prompt + ' [%s]: ' % default) or default\n self.parser.set(section, option, value)\n else:\n value = ''\n while not value:\n if ispass:\n value = getpass.getpass(prompt + ': ')\n else:\n value = raw_input(prompt + ': ')\n self.parser.set(section, option, value)\n\n def add_list(self, section, option, message, prompt):\n \"\"\"Ask for a list to assign to ``option``.\n\n Ask user in terminal for a list to assign to ``option``. If ``option``\n is already defined under ``section``, show the user the current values\n and ask if the user would like to keep them. If so, additional values\n can be entered.\n\n \"\"\"\n print(message)\n lst = []\n if self.parser.has_option(section, option) and self.parser.get(section,\n option):\n m = \"You currently have \" + self.parser.get(section, option)\n if self.option(m + '. Would you like to keep them', True):\n lst = self.parser.get(section, option).split(',')\n mem = raw_input(prompt + ' ')\n while mem:\n lst.append(mem)\n mem = raw_input(prompt + ' ')\n self.parser.set(section, option, ','.join(lst))\n\n def add_option(self, section, option, question, default=False):\n \"\"\"Ask \"y/n\" and set `option` based in the response.\n\n Show user in terminal a \"y/n\" prompt, and set `option` to True or False\n based on the response. If default is passed as true, the default will\n be shown as ``[y]``, else it will be ``[n]``. ``question`` should be\n phrased as a question, but without a question mark at the end. If\n ``option`` is already defined, it will be used instead of ``default``,\n regardless of wheather ``default`` is passed.\n\n \"\"\"\n if not self.parser.has_section(section):\n self.parser.add_section(section)\n if self.parser.has_option(section, option):\n default = self.parser.getboolean(section, option)\n answer = self.option(question, default)\n self.parser.set(section, option, str(answer))\n\n def option(self, question, default=False):\n \"\"\"Ask \"y/n\" and return the corresponding boolean answer.\n\n Show user in terminal a \"y/n\" prompt, and return true or false based on\n the response. If default is passed as true, the default will be shown\n as ``[y]``, else it will be ``[n]``. ``question`` should be phrased as\n a question, but without a question mark at the end.\n\n \"\"\"\n d = 'n'\n if default:\n d = 'y'\n ans = raw_input(question + ' (y/n)? [' + d + '] ')\n if not ans:\n ans = d\n return ans.lower() == 'y'\n\n def _core(self):\n self.interactive_add('core', 'nick', 'Enter the nickname for your bot',\n 'Willie')\n self.interactive_add('core', 'host', 'Enter the server to connect to',\n 'irc.dftba.net')\n self.add_option('core', 'use_ssl', 'Should the bot connect with SSL')\n if self.use_ssl == 'True':\n default_port = '6697'\n else:\n default_port = '6667'\n self.interactive_add('core', 'port', 'Enter the port to connect on',\n default_port)\n self.interactive_add(\n 'core', 'owner',\n \"Enter your own IRC name (or that of the bot's owner)\"\n )\n c = 'Enter the channels to connect to by default, one at a time.' + \\\n ' When done, hit enter again.'\n self.add_list('core', 'channels', c, 'Channel:')\n\n def _db(self):\n db.configure(self)\n self.save()\n\n def _modules(self):\n home = os.getcwd()\n modules_dir = os.path.join(home, 'modules')\n filenames = self.enumerate_modules()\n os.sys.path.insert(0, modules_dir)\n for name, filename in iteritems(filenames):\n try:\n module = imp.load_source(name, filename)\n except Exception as e:\n print(\"Error loading %s: %s (in config.py)\"\n % (name, e), file=sys.stderr)\n else:\n if hasattr(module, 'configure'):\n module.configure(self)\n self.save()\n\n def enumerate_modules(self, show_all=False):\n \"\"\"Map the names of modules to the location of their file.\n\n *Availability: 4.0+*\n\n Return a dict mapping the names of modules to the location of their\n file. This searches the regular modules directory and all directories\n specified in the `core.extra` attribute of the `config` object. If two\n modules have the same name, the last one to be found will be returned\n and the rest will be ignored. Modules are found starting in the regular\n directory, followed by `~/.willie/modules`, and then through the extra\n directories in the order that the are specified.\n\n If `show_all` is given as `True`, the `enable` and `exclude`\n configuration options will be ignored, and all modules will be shown\n (though duplicates will still be ignored as above).\n\n \"\"\"\n modules = {}\n\n # First, add modules from the regular modules directory\n this_dir = os.path.dirname(os.path.abspath(__file__))\n modules_dir = os.path.join(this_dir, 'modules')\n for fn in os.listdir(modules_dir):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(modules_dir, fn)\n # Next, look in ~/.willie/modules\n if self.core.homedir is not None:\n home_modules_dir = os.path.join(self.core.homedir, 'modules')\n else:\n home_modules_dir = os.path.join(os.path.expanduser('~'), '.willie',\n 'modules')\n if not os.path.isdir(home_modules_dir):\n os.makedirs(home_modules_dir)\n for fn in os.listdir(home_modules_dir):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(home_modules_dir, fn)\n\n # Last, look at all the extra directories. (get_list returns [] if\n # there are none or the option isn't defined, so it'll just skip this\n # bit)\n for directory in self.core.get_list('extra'):\n for fn in os.listdir(directory):\n if fn.endswith('.py') and not fn.startswith('_'):\n modules[fn[:-3]] = os.path.join(directory, fn)\n\n # If caller wants all of them, don't apply white and blacklists\n if show_all:\n return modules\n\n # Apply whitelist, if present\n enable = self.core.get_list('enable')\n if enable:\n enabled_modules = {}\n for module in enable:\n if module in modules:\n enabled_modules[module] = modules[module]\n modules = enabled_modules\n\n # Apply blacklist, if present\n exclude = self.core.get_list('exclude')\n for module in exclude:\n if module in modules:\n del modules[module]\n\n return modules\n\n\ndef wizard(section, config=None):\n dotdir = os.path.expanduser('~/.willie')\n configpath = os.path.join(dotdir, (config or 'default') + '.cfg')\n if section == 'all':\n create_config(configpath)\n elif section == 'db':\n check_dir(False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" + \\\n \" Please make one before configuring these options.\")\n sys.exit(1)\n config = Config(configpath, True)\n config._db()\n elif section == 'mod':\n check_dir(False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" + \\\n \" Please make one before configuring these options.\")\n sys.exit(1)\n config = Config(configpath, True)\n config._modules()\n\n\ndef check_dir(create=True):\n dotdir = os.path.join(os.path.expanduser('~'), '.willie')\n if not os.path.isdir(dotdir):\n if create:\n print('Creating a config directory at ~/.willie...')\n try:\n os.makedirs(dotdir)\n except Exception as e:\n print('There was a problem creating %s:' % dotdir, file=sys.stderr)\n print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)\n print('Please fix this and then run Willie again.', file=sys.stderr)\n sys.exit(1)\n else:\n print(\"No config file found. Please make one before configuring these options.\")\n sys.exit(1)\n\n\ndef create_config(configpath):\n check_dir()\n print(\"Please answer the following questions\" + \\\n \" to create your configuration file:\\n\")\n try:\n config = Config(configpath, os.path.isfile(configpath))\n config._core()\n if config.option(\"Would you like to set up a settings database now\"):\n config._db()\n if config.option(\n 'Would you like to see if there are any modules'\n ' that need configuring'\n ):\n config._modules()\n config.save()\n except Exception as e:\n print(\"Encountered an error while writing the config file.\" + \\\n \" This shouldn't happen. Check permissions.\")\n raise\n sys.exit(1)\n print(\"Config file written sucessfully!\")\n",
"path": "willie/config.py"
}
] | diff --git a/willie/config.py b/willie/config.py
index 092e674e0f..9db78041ff 100644
--- a/willie/config.py
+++ b/willie/config.py
@@ -53,6 +53,7 @@
if sys.version_info.major >= 3:
unicode = str
basestring = str
+ raw_input = input
class ConfigurationError(Exception):
""" Exception type for configuration errors """
|
xonsh__xonsh-4631 | Interactive printing fails for objects that implement hasattr in a non-standard way
Interactive printing fails for objects that implement `hasattr` in a non-standard way.
For example, in the popular [BeautifulSoup](https://pypi.org/project/beautifulsoup4/) library, some objects have a `getattr` implementation that always returns True, irrespective of whether a value actually exists.
This causes
```python
if hasattr(obj, "xonsh_display"):
return obj.xonsh_display()
```
in `pretty.py` to fail.
```console
$ import bs4
$ bs4.BeautifulSoup("<html></html>", 'html.parser')
$ ... traceback: TypeError: 'NoneType' object is not callable
```
## xonfig
<details>
```
$ xonfig
+------------------+---------------------+
| xonsh | 0.11.0.dev40.dev40 |
| Git SHA | 70dd8bf2 |
| Commit Date | Jan 7 22:34:52 2022 |
| Python | 3.6.15 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.7 |
| shell type | prompt_toolkit |
| history backend | json |
| pygments | 2.11.2 |
| on posix | True |
| on linux | True |
| distro | unknown |
| on wsl | False |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | True |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib | [] |
| RC file | [] |
+------------------+---------------------+
```
</details>
## Expected Behavior
The value gets printed.
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
### Traceback (if applicable)
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 16776, in default
run_compiled_code(code, self.ctx, None, "single")
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 3563, in run_compiled_code
func(code, glb, loc)
File "<xonsh-code>", line 1, in <module>
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 21612, in _pprint_displayhook
printed_val = pretty(value)
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 2034, in pretty
return obj.xonsh_display()
TypeError: 'NoneType' object is not callable
```
</details>
## Steps to Reproduce
Described in first section.
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
Interactive printing fails for objects that implement hasattr in a non-standard way
Interactive printing fails for objects that implement `hasattr` in a non-standard way.
For example, in the popular [BeautifulSoup](https://pypi.org/project/beautifulsoup4/) library, some objects have a `getattr` implementation that always returns True, irrespective of whether a value actually exists.
This causes
```python
if hasattr(obj, "xonsh_display"):
return obj.xonsh_display()
```
in `pretty.py` to fail.
```console
$ import bs4
$ bs4.BeautifulSoup("<html></html>", 'html.parser')
$ ... traceback: TypeError: 'NoneType' object is not callable
```
## xonfig
<details>
```
$ xonfig
+------------------+---------------------+
| xonsh | 0.11.0.dev40.dev40 |
| Git SHA | 70dd8bf2 |
| Commit Date | Jan 7 22:34:52 2022 |
| Python | 3.6.15 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.7 |
| shell type | prompt_toolkit |
| history backend | json |
| pygments | 2.11.2 |
| on posix | True |
| on linux | True |
| distro | unknown |
| on wsl | False |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | True |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib | [] |
| RC file | [] |
+------------------+---------------------+
```
</details>
## Expected Behavior
The value gets printed.
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
### Traceback (if applicable)
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 16776, in default
run_compiled_code(code, self.ctx, None, "single")
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 3563, in run_compiled_code
func(code, glb, loc)
File "<xonsh-code>", line 1, in <module>
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 21612, in _pprint_displayhook
printed_val = pretty(value)
File "/usr/local/lib/python3.9/dist-packages/xonsh/__amalgam__.py", line 2034, in pretty
return obj.xonsh_display()
TypeError: 'NoneType' object is not callable
```
</details>
## Steps to Reproduce
Described in first section.
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
| [
{
"content": "\"\"\"\nPython advanced pretty printer. This pretty printer is intended to\nreplace the old `pprint` python module which does not allow developers\nto provide their own pretty print callbacks.\n\nThis module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.\n\nThe following implementations were forked from the IPython project:\n* Copyright (c) 2008-2014, IPython Development Team\n* Copyright (C) 2001-2007 Fernando Perez <[email protected]>\n* Copyright (c) 2001, Janko Hauser <[email protected]>\n* Copyright (c) 2001, Nathaniel Gray <[email protected]>\n\nExample Usage\n-------------\n\nTo directly print the representation of an object use `pprint`::\n\n from pretty import pretty_print\n pretty_pprint(complex_object)\n\nTo get a string of the output use `pretty`::\n\n from pretty import pretty\n string = pretty(complex_object)\n\n\nExtending\n---------\n\nThe pretty library allows developers to add pretty printing rules for their\nown objects. This process is straightforward. All you have to do is to\nadd a `_repr_pretty_` method to your object and call the methods on the\npretty printer passed::\n\n class MyObject(object):\n\n def _repr_pretty_(self, p, cycle):\n ...\n\nHere is an example implementation of a `_repr_pretty_` method for a list\nsubclass::\n\n class MyList(list):\n\n def _repr_pretty_(self, p, cycle):\n if cycle:\n p.text('MyList(...)')\n else:\n with p.group(8, 'MyList([', '])'):\n for idx, item in enumerate(self):\n if idx:\n p.text(',')\n p.breakable()\n p.pretty(item)\n\nThe `cycle` parameter is `True` if pretty detected a cycle. You *have* to\nreact to that or the result is an infinite loop. `p.text()` just adds\nnon breaking text to the output, `p.breakable()` either adds a whitespace\nor breaks here. If you pass it an argument it's used instead of the\ndefault space. `p.pretty` prettyprints another object using the pretty print\nmethod.\n\nThe first parameter to the `group` function specifies the extra indentation\nof the next line. In this example the next item will either be on the same\nline (if the items are short enough) or aligned with the right edge of the\nopening bracket of `MyList`.\n\nIf you just want to indent something you can use the group function\nwithout open / close parameters. You can also use this code::\n\n with p.indent(2):\n ...\n\n\n:copyright: 2007 by Armin Ronacher.\n Portions (c) 2009 by Robert Kern.\n:license: BSD License.\n\"\"\"\nimport io\nimport re\nimport sys\nimport types\nimport datetime\nimport contextlib\nimport collections\n\nfrom xonsh.lazyasd import LazyObject, lazyobject\n\n__all__ = [\n \"pretty\",\n \"pretty_print\",\n \"PrettyPrinter\",\n \"RepresentationPrinter\",\n \"for_type\",\n \"for_type_by_name\",\n]\n\n\nMAX_SEQ_LENGTH = 1000\n\n\ndef _safe_getattr(obj, attr, default=None):\n \"\"\"Safe version of getattr.\n\n Same as getattr, but will return ``default`` on any Exception,\n rather than raising.\n \"\"\"\n try:\n return getattr(obj, attr, default)\n except Exception:\n return default\n\n\ndef pretty(\n obj, verbose=False, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n):\n \"\"\"\n Pretty print the object's representation.\n \"\"\"\n if hasattr(obj, \"xonsh_display\"):\n return obj.xonsh_display()\n\n stream = io.StringIO()\n printer = RepresentationPrinter(\n stream, verbose, max_width, newline, max_seq_length=max_seq_length\n )\n printer.pretty(obj)\n printer.flush()\n return stream.getvalue()\n\n\ndef pretty_print(\n obj, verbose=False, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n):\n \"\"\"\n Like pretty() but print to stdout.\n \"\"\"\n printer = RepresentationPrinter(\n sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length\n )\n printer.pretty(obj)\n printer.flush()\n sys.stdout.write(newline)\n sys.stdout.flush()\n\n\nclass _PrettyPrinterBase:\n @contextlib.contextmanager\n def indent(self, indent):\n \"\"\"with statement support for indenting/dedenting.\"\"\"\n self.indentation += indent\n try:\n yield\n finally:\n self.indentation -= indent\n\n @contextlib.contextmanager\n def group(self, indent=0, open=\"\", close=\"\"):\n \"\"\"like begin_group / end_group but for the with statement.\"\"\"\n self.begin_group(indent, open)\n try:\n yield\n finally:\n self.end_group(indent, close)\n\n\nclass PrettyPrinter(_PrettyPrinterBase):\n \"\"\"\n Baseclass for the `RepresentationPrinter` prettyprinter that is used to\n generate pretty reprs of objects. Contrary to the `RepresentationPrinter`\n this printer knows nothing about the default pprinters or the `_repr_pretty_`\n callback method.\n \"\"\"\n\n def __init__(\n self, output, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n ):\n self.output = output\n self.max_width = max_width\n self.newline = newline\n self.max_seq_length = max_seq_length\n self.output_width = 0\n self.buffer_width = 0\n self.buffer = collections.deque()\n\n root_group = Group(0)\n self.group_stack = [root_group]\n self.group_queue = GroupQueue(root_group)\n self.indentation = 0\n\n def _break_outer_groups(self):\n while self.max_width < self.output_width + self.buffer_width:\n group = self.group_queue.deq()\n if not group:\n return\n while group.breakables:\n x = self.buffer.popleft()\n self.output_width = x.output(self.output, self.output_width)\n self.buffer_width -= x.width\n while self.buffer and isinstance(self.buffer[0], Text):\n x = self.buffer.popleft()\n self.output_width = x.output(self.output, self.output_width)\n self.buffer_width -= x.width\n\n def text(self, obj):\n \"\"\"Add literal text to the output.\"\"\"\n width = len(obj)\n if self.buffer:\n text = self.buffer[-1]\n if not isinstance(text, Text):\n text = Text()\n self.buffer.append(text)\n text.add(obj, width)\n self.buffer_width += width\n self._break_outer_groups()\n else:\n self.output.write(obj)\n self.output_width += width\n\n def breakable(self, sep=\" \"):\n \"\"\"\n Add a breakable separator to the output. This does not mean that it\n will automatically break here. If no breaking on this position takes\n place the `sep` is inserted which default to one space.\n \"\"\"\n width = len(sep)\n group = self.group_stack[-1]\n if group.want_break:\n self.flush()\n self.output.write(self.newline)\n self.output.write(\" \" * self.indentation)\n self.output_width = self.indentation\n self.buffer_width = 0\n else:\n self.buffer.append(Breakable(sep, width, self))\n self.buffer_width += width\n self._break_outer_groups()\n\n def break_(self):\n \"\"\"\n Explicitly insert a newline into the output, maintaining correct indentation.\n \"\"\"\n self.flush()\n self.output.write(self.newline)\n self.output.write(\" \" * self.indentation)\n self.output_width = self.indentation\n self.buffer_width = 0\n\n def begin_group(self, indent=0, open=\"\"):\n \"\"\"\n Begin a group. If you want support for python < 2.5 which doesn't has\n the with statement this is the preferred way:\n\n p.begin_group(1, '{')\n ...\n p.end_group(1, '}')\n\n The python 2.5 expression would be this:\n\n with p.group(1, '{', '}'):\n ...\n\n The first parameter specifies the indentation for the next line (usually\n the width of the opening text), the second the opening text. All\n parameters are optional.\n \"\"\"\n if open:\n self.text(open)\n group = Group(self.group_stack[-1].depth + 1)\n self.group_stack.append(group)\n self.group_queue.enq(group)\n self.indentation += indent\n\n def _enumerate(self, seq):\n \"\"\"like enumerate, but with an upper limit on the number of items\"\"\"\n for idx, x in enumerate(seq):\n if self.max_seq_length and idx >= self.max_seq_length:\n self.text(\",\")\n self.breakable()\n self.text(\"...\")\n return\n yield idx, x\n\n def end_group(self, dedent=0, close=\"\"):\n \"\"\"End a group. See `begin_group` for more details.\"\"\"\n self.indentation -= dedent\n group = self.group_stack.pop()\n if not group.breakables:\n self.group_queue.remove(group)\n if close:\n self.text(close)\n\n def flush(self):\n \"\"\"Flush data that is left in the buffer.\"\"\"\n for data in self.buffer:\n self.output_width += data.output(self.output, self.output_width)\n self.buffer.clear()\n self.buffer_width = 0\n\n\ndef _get_mro(obj_class):\n \"\"\"Get a reasonable method resolution order of a class and its superclasses\n for both old-style and new-style classes.\n \"\"\"\n if not hasattr(obj_class, \"__mro__\"):\n # Old-style class. Mix in object to make a fake new-style class.\n try:\n obj_class = type(obj_class.__name__, (obj_class, object), {})\n except TypeError:\n # Old-style extension type that does not descend from object.\n # FIXME: try to construct a more thorough MRO.\n mro = [obj_class]\n else:\n mro = obj_class.__mro__[1:-1]\n else:\n mro = obj_class.__mro__\n return mro\n\n\nclass RepresentationPrinter(PrettyPrinter):\n \"\"\"\n Special pretty printer that has a `pretty` method that calls the pretty\n printer for a python object.\n\n This class stores processing data on `self` so you must *never* use\n this class in a threaded environment. Always lock it or reinstantiate\n it.\n\n Instances also have a verbose flag callbacks can access to control their\n output. For example the default instance repr prints all attributes and\n methods that are not prefixed by an underscore if the printer is in\n verbose mode.\n \"\"\"\n\n def __init__(\n self,\n output,\n verbose=False,\n max_width=79,\n newline=\"\\n\",\n singleton_pprinters=None,\n type_pprinters=None,\n deferred_pprinters=None,\n max_seq_length=MAX_SEQ_LENGTH,\n ):\n\n PrettyPrinter.__init__(\n self, output, max_width, newline, max_seq_length=max_seq_length\n )\n self.verbose = verbose\n self.stack = []\n if singleton_pprinters is None:\n singleton_pprinters = _singleton_pprinters.copy()\n self.singleton_pprinters = singleton_pprinters\n if type_pprinters is None:\n type_pprinters = _type_pprinters.copy()\n self.type_pprinters = type_pprinters\n if deferred_pprinters is None:\n deferred_pprinters = _deferred_type_pprinters.copy()\n self.deferred_pprinters = deferred_pprinters\n\n def pretty(self, obj):\n \"\"\"Pretty print the given object.\"\"\"\n obj_id = id(obj)\n cycle = obj_id in self.stack\n self.stack.append(obj_id)\n self.begin_group()\n try:\n obj_class = _safe_getattr(obj, \"__class__\", None) or type(obj)\n # First try to find registered singleton printers for the type.\n try:\n printer = self.singleton_pprinters[obj_id]\n except (TypeError, KeyError):\n pass\n else:\n return printer(obj, self, cycle)\n # Next walk the mro and check for either:\n # 1) a registered printer\n # 2) a _repr_pretty_ method\n for cls in _get_mro(obj_class):\n if cls in self.type_pprinters:\n # printer registered in self.type_pprinters\n return self.type_pprinters[cls](obj, self, cycle)\n else:\n # deferred printer\n printer = self._in_deferred_types(cls)\n if printer is not None:\n return printer(obj, self, cycle)\n else:\n # Finally look for special method names.\n # Some objects automatically create any requested\n # attribute. Try to ignore most of them by checking for\n # callability.\n if \"_repr_pretty_\" in cls.__dict__:\n meth = cls._repr_pretty_\n if callable(meth):\n return meth(obj, self, cycle)\n return _default_pprint(obj, self, cycle)\n finally:\n self.end_group()\n self.stack.pop()\n\n def _in_deferred_types(self, cls):\n \"\"\"\n Check if the given class is specified in the deferred type registry.\n\n Returns the printer from the registry if it exists, and None if the\n class is not in the registry. Successful matches will be moved to the\n regular type registry for future use.\n \"\"\"\n mod = _safe_getattr(cls, \"__module__\", None)\n name = _safe_getattr(cls, \"__name__\", None)\n key = (mod, name)\n printer = None\n if key in self.deferred_pprinters:\n # Move the printer over to the regular registry.\n printer = self.deferred_pprinters.pop(key)\n self.type_pprinters[cls] = printer\n return printer\n\n\nclass Printable:\n def output(self, stream, output_width):\n return output_width\n\n\nclass Text(Printable):\n def __init__(self):\n self.objs = []\n self.width = 0\n\n def output(self, stream, output_width):\n for obj in self.objs:\n stream.write(obj)\n return output_width + self.width\n\n def add(self, obj, width):\n self.objs.append(obj)\n self.width += width\n\n\nclass Breakable(Printable):\n def __init__(self, seq, width, pretty):\n self.obj = seq\n self.width = width\n self.pretty = pretty\n self.indentation = pretty.indentation\n self.group = pretty.group_stack[-1]\n self.group.breakables.append(self)\n\n def output(self, stream, output_width):\n self.group.breakables.popleft()\n if self.group.want_break:\n stream.write(self.pretty.newline)\n stream.write(\" \" * self.indentation)\n return self.indentation\n if not self.group.breakables:\n self.pretty.group_queue.remove(self.group)\n stream.write(self.obj)\n return output_width + self.width\n\n\nclass Group(Printable):\n def __init__(self, depth):\n self.depth = depth\n self.breakables = collections.deque()\n self.want_break = False\n\n\nclass GroupQueue:\n def __init__(self, *groups):\n self.queue = []\n for group in groups:\n self.enq(group)\n\n def enq(self, group):\n depth = group.depth\n while depth > len(self.queue) - 1:\n self.queue.append([])\n self.queue[depth].append(group)\n\n def deq(self):\n for stack in self.queue:\n for idx, group in enumerate(reversed(stack)):\n if group.breakables:\n del stack[idx]\n group.want_break = True\n return group\n for group in stack:\n group.want_break = True\n del stack[:]\n\n def remove(self, group):\n try:\n self.queue[group.depth].remove(group)\n except ValueError:\n pass\n\n\n@lazyobject\ndef _baseclass_reprs():\n try:\n br = (object.__repr__, types.InstanceType.__repr__)\n except AttributeError: # Python 3\n br = (object.__repr__,)\n return br\n\n\ndef _default_pprint(obj, p, cycle):\n \"\"\"\n The default print function. Used if an object does not provide one and\n it's none of the builtin objects.\n \"\"\"\n klass = _safe_getattr(obj, \"__class__\", None) or type(obj)\n if _safe_getattr(klass, \"__repr__\", None) not in _baseclass_reprs:\n # A user-provided repr. Find newlines and replace them with p.break_()\n _repr_pprint(obj, p, cycle)\n return\n p.begin_group(1, \"<\")\n p.pretty(klass)\n p.text(\" at 0x%x\" % id(obj))\n if cycle:\n p.text(\" ...\")\n elif p.verbose:\n first = True\n for key in dir(obj):\n if not key.startswith(\"_\"):\n try:\n value = getattr(obj, key)\n except AttributeError:\n continue\n if isinstance(value, types.MethodType):\n continue\n if not first:\n p.text(\",\")\n p.breakable()\n p.text(key)\n p.text(\"=\")\n step = len(key) + 1\n p.indentation += step\n p.pretty(value)\n p.indentation -= step\n first = False\n p.end_group(1, \">\")\n\n\ndef _seq_pprinter_factory(start, end, basetype):\n \"\"\"\n Factory that returns a pprint function useful for sequences. Used by\n the default pprint for tuples, dicts, and lists.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(start + \"...\" + end)\n step = len(start)\n p.begin_group(step, start)\n for idx, x in p._enumerate(obj):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(x)\n if len(obj) == 1 and type(obj) is tuple:\n # Special case for 1-item tuples.\n p.text(\",\")\n p.end_group(step, end)\n\n return inner\n\n\ndef _set_pprinter_factory(start, end, basetype):\n \"\"\"\n Factory that returns a pprint function useful for sets and frozensets.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(start + \"...\" + end)\n if len(obj) == 0:\n # Special case.\n p.text(basetype.__name__ + \"()\")\n else:\n step = len(start)\n p.begin_group(step, start)\n # Like dictionary keys, we will try to sort the items if there aren't too many\n items = obj\n if not (p.max_seq_length and len(obj) >= p.max_seq_length):\n try:\n items = sorted(obj)\n except Exception:\n # Sometimes the items don't sort.\n pass\n for idx, x in p._enumerate(items):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(x)\n p.end_group(step, end)\n\n return inner\n\n\ndef _dict_pprinter_factory(start, end, basetype=None):\n \"\"\"\n Factory that returns a pprint function used by the default pprint of\n dicts and dict proxies.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(\"{...}\")\n p.begin_group(1, start)\n keys = obj.keys()\n # if dict isn't large enough to be truncated, sort keys before displaying\n if not (p.max_seq_length and len(obj) >= p.max_seq_length):\n try:\n keys = sorted(keys)\n except Exception:\n # Sometimes the keys don't sort.\n pass\n for idx, key in p._enumerate(keys):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(key)\n p.text(\": \")\n p.pretty(obj[key])\n p.end_group(1, end)\n\n return inner\n\n\ndef _super_pprint(obj, p, cycle):\n \"\"\"The pprint for the super type.\"\"\"\n p.begin_group(8, \"<super: \")\n p.pretty(obj.__thisclass__)\n p.text(\",\")\n p.breakable()\n p.pretty(obj.__self__)\n p.end_group(8, \">\")\n\n\ndef _re_pattern_pprint(obj, p, cycle):\n \"\"\"The pprint function for regular expression patterns.\"\"\"\n p.text(\"re.compile(\")\n pattern = repr(obj.pattern)\n if pattern[:1] in \"uU\":\n pattern = pattern[1:]\n prefix = \"ur\"\n else:\n prefix = \"r\"\n pattern = prefix + pattern.replace(\"\\\\\\\\\", \"\\\\\")\n p.text(pattern)\n if obj.flags:\n p.text(\",\")\n p.breakable()\n done_one = False\n for flag in (\n \"TEMPLATE\",\n \"IGNORECASE\",\n \"LOCALE\",\n \"MULTILINE\",\n \"DOTALL\",\n \"UNICODE\",\n \"VERBOSE\",\n \"DEBUG\",\n ):\n if obj.flags & getattr(re, flag):\n if done_one:\n p.text(\"|\")\n p.text(\"re.\" + flag)\n done_one = True\n p.text(\")\")\n\n\ndef _type_pprint(obj, p, cycle):\n \"\"\"The pprint for classes and types.\"\"\"\n # Heap allocated types might not have the module attribute,\n # and others may set it to None.\n\n # Checks for a __repr__ override in the metaclass\n if type(obj).__repr__ is not type.__repr__:\n _repr_pprint(obj, p, cycle)\n return\n\n mod = _safe_getattr(obj, \"__module__\", None)\n try:\n name = obj.__qualname__\n if not isinstance(name, str):\n # This can happen if the type implements __qualname__ as a property\n # or other descriptor in Python 2.\n raise Exception(\"Try __name__\")\n except Exception:\n name = obj.__name__\n if not isinstance(name, str):\n name = \"<unknown type>\"\n\n if mod in (None, \"__builtin__\", \"builtins\", \"exceptions\"):\n p.text(name)\n else:\n p.text(mod + \".\" + name)\n\n\ndef _repr_pprint(obj, p, cycle):\n \"\"\"A pprint that just redirects to the normal repr function.\"\"\"\n # Find newlines and replace them with p.break_()\n output = repr(obj)\n for idx, output_line in enumerate(output.splitlines()):\n if idx:\n p.break_()\n p.text(output_line)\n\n\ndef _function_pprint(obj, p, cycle):\n \"\"\"Base pprint for all functions and builtin functions.\"\"\"\n name = _safe_getattr(obj, \"__qualname__\", obj.__name__)\n mod = obj.__module__\n if mod and mod not in (\"__builtin__\", \"builtins\", \"exceptions\"):\n name = mod + \".\" + name\n p.text(\"<function %s>\" % name)\n\n\ndef _exception_pprint(obj, p, cycle):\n \"\"\"Base pprint for all exceptions.\"\"\"\n name = getattr(obj.__class__, \"__qualname__\", obj.__class__.__name__)\n if obj.__class__.__module__ not in (\"exceptions\", \"builtins\"):\n name = f\"{obj.__class__.__module__}.{name}\"\n step = len(name) + 1\n p.begin_group(step, name + \"(\")\n for idx, arg in enumerate(getattr(obj, \"args\", ())):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(arg)\n p.end_group(step, \")\")\n\n\n@lazyobject\ndef _type_pprinters():\n #: printers for builtin types\n tp = {\n int: _repr_pprint,\n float: _repr_pprint,\n str: _repr_pprint,\n tuple: _seq_pprinter_factory(\"(\", \")\", tuple),\n list: _seq_pprinter_factory(\"[\", \"]\", list),\n dict: _dict_pprinter_factory(\"{\", \"}\", dict),\n set: _set_pprinter_factory(\"{\", \"}\", set),\n frozenset: _set_pprinter_factory(\"frozenset({\", \"})\", frozenset),\n super: _super_pprint,\n type(re.compile(\"\")): _re_pattern_pprint,\n type: _type_pprint,\n types.FunctionType: _function_pprint,\n types.BuiltinFunctionType: _function_pprint,\n types.MethodType: _repr_pprint,\n datetime.datetime: _repr_pprint,\n datetime.timedelta: _repr_pprint,\n }\n #: the exception base\n try:\n _exception_base = BaseException\n except NameError:\n _exception_base = Exception\n tp[_exception_base] = _exception_pprint\n try:\n tp[types.DictProxyType] = _dict_pprinter_factory(\"<dictproxy {\", \"}>\")\n tp[types.ClassType] = _type_pprint\n tp[types.SliceType] = _repr_pprint\n except AttributeError: # Python 3\n tp[slice] = _repr_pprint\n try:\n tp[xrange] = _repr_pprint\n tp[long] = _repr_pprint\n tp[unicode] = _repr_pprint\n except NameError:\n tp[range] = _repr_pprint\n tp[bytes] = _repr_pprint\n return tp\n\n\n#: printers for types specified by name\n@lazyobject\ndef _deferred_type_pprinters():\n dtp = {}\n for_type_by_name(\"collections\", \"defaultdict\", _defaultdict_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"OrderedDict\", _ordereddict_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"deque\", _deque_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"Counter\", _counter_pprint, dtp=dtp)\n return dtp\n\n\ndef for_type(typ, func):\n \"\"\"\n Add a pretty printer for a given type.\n \"\"\"\n oldfunc = _type_pprinters.get(typ, None)\n if func is not None:\n # To support easy restoration of old pprinters, we need to ignore Nones.\n _type_pprinters[typ] = func\n return oldfunc\n\n\ndef for_type_by_name(type_module, type_name, func, dtp=None):\n \"\"\"\n Add a pretty printer for a type specified by the module and name of a type\n rather than the type object itself.\n \"\"\"\n if dtp is None:\n dtp = _deferred_type_pprinters\n key = (type_module, type_name)\n oldfunc = dtp.get(key, None)\n if func is not None:\n # To support easy restoration of old pprinters, we need to ignore Nones.\n dtp[key] = func\n return oldfunc\n\n\n#: printers for the default singletons\n_singleton_pprinters = LazyObject(\n lambda: dict.fromkeys(\n map(id, [None, True, False, Ellipsis, NotImplemented]), _repr_pprint\n ),\n globals(),\n \"_singleton_pprinters\",\n)\n\n\ndef _defaultdict_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n else:\n p.pretty(obj.default_factory)\n p.text(\",\")\n p.breakable()\n p.pretty(dict(obj))\n\n\ndef _ordereddict_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n elif len(obj):\n p.pretty(list(obj.items()))\n\n\ndef _deque_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n else:\n p.pretty(list(obj))\n\n\ndef _counter_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n elif len(obj):\n p.pretty(dict(obj))\n",
"path": "xonsh/pretty.py"
}
] | [
{
"content": "\"\"\"\nPython advanced pretty printer. This pretty printer is intended to\nreplace the old `pprint` python module which does not allow developers\nto provide their own pretty print callbacks.\n\nThis module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.\n\nThe following implementations were forked from the IPython project:\n* Copyright (c) 2008-2014, IPython Development Team\n* Copyright (C) 2001-2007 Fernando Perez <[email protected]>\n* Copyright (c) 2001, Janko Hauser <[email protected]>\n* Copyright (c) 2001, Nathaniel Gray <[email protected]>\n\nExample Usage\n-------------\n\nTo directly print the representation of an object use `pprint`::\n\n from pretty import pretty_print\n pretty_pprint(complex_object)\n\nTo get a string of the output use `pretty`::\n\n from pretty import pretty\n string = pretty(complex_object)\n\n\nExtending\n---------\n\nThe pretty library allows developers to add pretty printing rules for their\nown objects. This process is straightforward. All you have to do is to\nadd a `_repr_pretty_` method to your object and call the methods on the\npretty printer passed::\n\n class MyObject(object):\n\n def _repr_pretty_(self, p, cycle):\n ...\n\nHere is an example implementation of a `_repr_pretty_` method for a list\nsubclass::\n\n class MyList(list):\n\n def _repr_pretty_(self, p, cycle):\n if cycle:\n p.text('MyList(...)')\n else:\n with p.group(8, 'MyList([', '])'):\n for idx, item in enumerate(self):\n if idx:\n p.text(',')\n p.breakable()\n p.pretty(item)\n\nThe `cycle` parameter is `True` if pretty detected a cycle. You *have* to\nreact to that or the result is an infinite loop. `p.text()` just adds\nnon breaking text to the output, `p.breakable()` either adds a whitespace\nor breaks here. If you pass it an argument it's used instead of the\ndefault space. `p.pretty` prettyprints another object using the pretty print\nmethod.\n\nThe first parameter to the `group` function specifies the extra indentation\nof the next line. In this example the next item will either be on the same\nline (if the items are short enough) or aligned with the right edge of the\nopening bracket of `MyList`.\n\nIf you just want to indent something you can use the group function\nwithout open / close parameters. You can also use this code::\n\n with p.indent(2):\n ...\n\n\n:copyright: 2007 by Armin Ronacher.\n Portions (c) 2009 by Robert Kern.\n:license: BSD License.\n\"\"\"\nimport io\nimport re\nimport sys\nimport types\nimport datetime\nimport contextlib\nimport collections\n\nfrom xonsh.lazyasd import LazyObject, lazyobject\n\n__all__ = [\n \"pretty\",\n \"pretty_print\",\n \"PrettyPrinter\",\n \"RepresentationPrinter\",\n \"for_type\",\n \"for_type_by_name\",\n]\n\n\nMAX_SEQ_LENGTH = 1000\n\n\ndef _safe_getattr(obj, attr, default=None):\n \"\"\"Safe version of getattr.\n\n Same as getattr, but will return ``default`` on any Exception,\n rather than raising.\n \"\"\"\n try:\n return getattr(obj, attr, default)\n except Exception:\n return default\n\n\ndef pretty(\n obj, verbose=False, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n):\n \"\"\"\n Pretty print the object's representation.\n \"\"\"\n if _safe_getattr(obj, \"xonsh_display\"):\n return obj.xonsh_display()\n\n stream = io.StringIO()\n printer = RepresentationPrinter(\n stream, verbose, max_width, newline, max_seq_length=max_seq_length\n )\n printer.pretty(obj)\n printer.flush()\n return stream.getvalue()\n\n\ndef pretty_print(\n obj, verbose=False, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n):\n \"\"\"\n Like pretty() but print to stdout.\n \"\"\"\n printer = RepresentationPrinter(\n sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length\n )\n printer.pretty(obj)\n printer.flush()\n sys.stdout.write(newline)\n sys.stdout.flush()\n\n\nclass _PrettyPrinterBase:\n @contextlib.contextmanager\n def indent(self, indent):\n \"\"\"with statement support for indenting/dedenting.\"\"\"\n self.indentation += indent\n try:\n yield\n finally:\n self.indentation -= indent\n\n @contextlib.contextmanager\n def group(self, indent=0, open=\"\", close=\"\"):\n \"\"\"like begin_group / end_group but for the with statement.\"\"\"\n self.begin_group(indent, open)\n try:\n yield\n finally:\n self.end_group(indent, close)\n\n\nclass PrettyPrinter(_PrettyPrinterBase):\n \"\"\"\n Baseclass for the `RepresentationPrinter` prettyprinter that is used to\n generate pretty reprs of objects. Contrary to the `RepresentationPrinter`\n this printer knows nothing about the default pprinters or the `_repr_pretty_`\n callback method.\n \"\"\"\n\n def __init__(\n self, output, max_width=79, newline=\"\\n\", max_seq_length=MAX_SEQ_LENGTH\n ):\n self.output = output\n self.max_width = max_width\n self.newline = newline\n self.max_seq_length = max_seq_length\n self.output_width = 0\n self.buffer_width = 0\n self.buffer = collections.deque()\n\n root_group = Group(0)\n self.group_stack = [root_group]\n self.group_queue = GroupQueue(root_group)\n self.indentation = 0\n\n def _break_outer_groups(self):\n while self.max_width < self.output_width + self.buffer_width:\n group = self.group_queue.deq()\n if not group:\n return\n while group.breakables:\n x = self.buffer.popleft()\n self.output_width = x.output(self.output, self.output_width)\n self.buffer_width -= x.width\n while self.buffer and isinstance(self.buffer[0], Text):\n x = self.buffer.popleft()\n self.output_width = x.output(self.output, self.output_width)\n self.buffer_width -= x.width\n\n def text(self, obj):\n \"\"\"Add literal text to the output.\"\"\"\n width = len(obj)\n if self.buffer:\n text = self.buffer[-1]\n if not isinstance(text, Text):\n text = Text()\n self.buffer.append(text)\n text.add(obj, width)\n self.buffer_width += width\n self._break_outer_groups()\n else:\n self.output.write(obj)\n self.output_width += width\n\n def breakable(self, sep=\" \"):\n \"\"\"\n Add a breakable separator to the output. This does not mean that it\n will automatically break here. If no breaking on this position takes\n place the `sep` is inserted which default to one space.\n \"\"\"\n width = len(sep)\n group = self.group_stack[-1]\n if group.want_break:\n self.flush()\n self.output.write(self.newline)\n self.output.write(\" \" * self.indentation)\n self.output_width = self.indentation\n self.buffer_width = 0\n else:\n self.buffer.append(Breakable(sep, width, self))\n self.buffer_width += width\n self._break_outer_groups()\n\n def break_(self):\n \"\"\"\n Explicitly insert a newline into the output, maintaining correct indentation.\n \"\"\"\n self.flush()\n self.output.write(self.newline)\n self.output.write(\" \" * self.indentation)\n self.output_width = self.indentation\n self.buffer_width = 0\n\n def begin_group(self, indent=0, open=\"\"):\n \"\"\"\n Begin a group. If you want support for python < 2.5 which doesn't has\n the with statement this is the preferred way:\n\n p.begin_group(1, '{')\n ...\n p.end_group(1, '}')\n\n The python 2.5 expression would be this:\n\n with p.group(1, '{', '}'):\n ...\n\n The first parameter specifies the indentation for the next line (usually\n the width of the opening text), the second the opening text. All\n parameters are optional.\n \"\"\"\n if open:\n self.text(open)\n group = Group(self.group_stack[-1].depth + 1)\n self.group_stack.append(group)\n self.group_queue.enq(group)\n self.indentation += indent\n\n def _enumerate(self, seq):\n \"\"\"like enumerate, but with an upper limit on the number of items\"\"\"\n for idx, x in enumerate(seq):\n if self.max_seq_length and idx >= self.max_seq_length:\n self.text(\",\")\n self.breakable()\n self.text(\"...\")\n return\n yield idx, x\n\n def end_group(self, dedent=0, close=\"\"):\n \"\"\"End a group. See `begin_group` for more details.\"\"\"\n self.indentation -= dedent\n group = self.group_stack.pop()\n if not group.breakables:\n self.group_queue.remove(group)\n if close:\n self.text(close)\n\n def flush(self):\n \"\"\"Flush data that is left in the buffer.\"\"\"\n for data in self.buffer:\n self.output_width += data.output(self.output, self.output_width)\n self.buffer.clear()\n self.buffer_width = 0\n\n\ndef _get_mro(obj_class):\n \"\"\"Get a reasonable method resolution order of a class and its superclasses\n for both old-style and new-style classes.\n \"\"\"\n if not hasattr(obj_class, \"__mro__\"):\n # Old-style class. Mix in object to make a fake new-style class.\n try:\n obj_class = type(obj_class.__name__, (obj_class, object), {})\n except TypeError:\n # Old-style extension type that does not descend from object.\n # FIXME: try to construct a more thorough MRO.\n mro = [obj_class]\n else:\n mro = obj_class.__mro__[1:-1]\n else:\n mro = obj_class.__mro__\n return mro\n\n\nclass RepresentationPrinter(PrettyPrinter):\n \"\"\"\n Special pretty printer that has a `pretty` method that calls the pretty\n printer for a python object.\n\n This class stores processing data on `self` so you must *never* use\n this class in a threaded environment. Always lock it or reinstantiate\n it.\n\n Instances also have a verbose flag callbacks can access to control their\n output. For example the default instance repr prints all attributes and\n methods that are not prefixed by an underscore if the printer is in\n verbose mode.\n \"\"\"\n\n def __init__(\n self,\n output,\n verbose=False,\n max_width=79,\n newline=\"\\n\",\n singleton_pprinters=None,\n type_pprinters=None,\n deferred_pprinters=None,\n max_seq_length=MAX_SEQ_LENGTH,\n ):\n\n PrettyPrinter.__init__(\n self, output, max_width, newline, max_seq_length=max_seq_length\n )\n self.verbose = verbose\n self.stack = []\n if singleton_pprinters is None:\n singleton_pprinters = _singleton_pprinters.copy()\n self.singleton_pprinters = singleton_pprinters\n if type_pprinters is None:\n type_pprinters = _type_pprinters.copy()\n self.type_pprinters = type_pprinters\n if deferred_pprinters is None:\n deferred_pprinters = _deferred_type_pprinters.copy()\n self.deferred_pprinters = deferred_pprinters\n\n def pretty(self, obj):\n \"\"\"Pretty print the given object.\"\"\"\n obj_id = id(obj)\n cycle = obj_id in self.stack\n self.stack.append(obj_id)\n self.begin_group()\n try:\n obj_class = _safe_getattr(obj, \"__class__\", None) or type(obj)\n # First try to find registered singleton printers for the type.\n try:\n printer = self.singleton_pprinters[obj_id]\n except (TypeError, KeyError):\n pass\n else:\n return printer(obj, self, cycle)\n # Next walk the mro and check for either:\n # 1) a registered printer\n # 2) a _repr_pretty_ method\n for cls in _get_mro(obj_class):\n if cls in self.type_pprinters:\n # printer registered in self.type_pprinters\n return self.type_pprinters[cls](obj, self, cycle)\n else:\n # deferred printer\n printer = self._in_deferred_types(cls)\n if printer is not None:\n return printer(obj, self, cycle)\n else:\n # Finally look for special method names.\n # Some objects automatically create any requested\n # attribute. Try to ignore most of them by checking for\n # callability.\n if \"_repr_pretty_\" in cls.__dict__:\n meth = cls._repr_pretty_\n if callable(meth):\n return meth(obj, self, cycle)\n return _default_pprint(obj, self, cycle)\n finally:\n self.end_group()\n self.stack.pop()\n\n def _in_deferred_types(self, cls):\n \"\"\"\n Check if the given class is specified in the deferred type registry.\n\n Returns the printer from the registry if it exists, and None if the\n class is not in the registry. Successful matches will be moved to the\n regular type registry for future use.\n \"\"\"\n mod = _safe_getattr(cls, \"__module__\", None)\n name = _safe_getattr(cls, \"__name__\", None)\n key = (mod, name)\n printer = None\n if key in self.deferred_pprinters:\n # Move the printer over to the regular registry.\n printer = self.deferred_pprinters.pop(key)\n self.type_pprinters[cls] = printer\n return printer\n\n\nclass Printable:\n def output(self, stream, output_width):\n return output_width\n\n\nclass Text(Printable):\n def __init__(self):\n self.objs = []\n self.width = 0\n\n def output(self, stream, output_width):\n for obj in self.objs:\n stream.write(obj)\n return output_width + self.width\n\n def add(self, obj, width):\n self.objs.append(obj)\n self.width += width\n\n\nclass Breakable(Printable):\n def __init__(self, seq, width, pretty):\n self.obj = seq\n self.width = width\n self.pretty = pretty\n self.indentation = pretty.indentation\n self.group = pretty.group_stack[-1]\n self.group.breakables.append(self)\n\n def output(self, stream, output_width):\n self.group.breakables.popleft()\n if self.group.want_break:\n stream.write(self.pretty.newline)\n stream.write(\" \" * self.indentation)\n return self.indentation\n if not self.group.breakables:\n self.pretty.group_queue.remove(self.group)\n stream.write(self.obj)\n return output_width + self.width\n\n\nclass Group(Printable):\n def __init__(self, depth):\n self.depth = depth\n self.breakables = collections.deque()\n self.want_break = False\n\n\nclass GroupQueue:\n def __init__(self, *groups):\n self.queue = []\n for group in groups:\n self.enq(group)\n\n def enq(self, group):\n depth = group.depth\n while depth > len(self.queue) - 1:\n self.queue.append([])\n self.queue[depth].append(group)\n\n def deq(self):\n for stack in self.queue:\n for idx, group in enumerate(reversed(stack)):\n if group.breakables:\n del stack[idx]\n group.want_break = True\n return group\n for group in stack:\n group.want_break = True\n del stack[:]\n\n def remove(self, group):\n try:\n self.queue[group.depth].remove(group)\n except ValueError:\n pass\n\n\n@lazyobject\ndef _baseclass_reprs():\n try:\n br = (object.__repr__, types.InstanceType.__repr__)\n except AttributeError: # Python 3\n br = (object.__repr__,)\n return br\n\n\ndef _default_pprint(obj, p, cycle):\n \"\"\"\n The default print function. Used if an object does not provide one and\n it's none of the builtin objects.\n \"\"\"\n klass = _safe_getattr(obj, \"__class__\", None) or type(obj)\n if _safe_getattr(klass, \"__repr__\", None) not in _baseclass_reprs:\n # A user-provided repr. Find newlines and replace them with p.break_()\n _repr_pprint(obj, p, cycle)\n return\n p.begin_group(1, \"<\")\n p.pretty(klass)\n p.text(\" at 0x%x\" % id(obj))\n if cycle:\n p.text(\" ...\")\n elif p.verbose:\n first = True\n for key in dir(obj):\n if not key.startswith(\"_\"):\n try:\n value = getattr(obj, key)\n except AttributeError:\n continue\n if isinstance(value, types.MethodType):\n continue\n if not first:\n p.text(\",\")\n p.breakable()\n p.text(key)\n p.text(\"=\")\n step = len(key) + 1\n p.indentation += step\n p.pretty(value)\n p.indentation -= step\n first = False\n p.end_group(1, \">\")\n\n\ndef _seq_pprinter_factory(start, end, basetype):\n \"\"\"\n Factory that returns a pprint function useful for sequences. Used by\n the default pprint for tuples, dicts, and lists.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(start + \"...\" + end)\n step = len(start)\n p.begin_group(step, start)\n for idx, x in p._enumerate(obj):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(x)\n if len(obj) == 1 and type(obj) is tuple:\n # Special case for 1-item tuples.\n p.text(\",\")\n p.end_group(step, end)\n\n return inner\n\n\ndef _set_pprinter_factory(start, end, basetype):\n \"\"\"\n Factory that returns a pprint function useful for sets and frozensets.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(start + \"...\" + end)\n if len(obj) == 0:\n # Special case.\n p.text(basetype.__name__ + \"()\")\n else:\n step = len(start)\n p.begin_group(step, start)\n # Like dictionary keys, we will try to sort the items if there aren't too many\n items = obj\n if not (p.max_seq_length and len(obj) >= p.max_seq_length):\n try:\n items = sorted(obj)\n except Exception:\n # Sometimes the items don't sort.\n pass\n for idx, x in p._enumerate(items):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(x)\n p.end_group(step, end)\n\n return inner\n\n\ndef _dict_pprinter_factory(start, end, basetype=None):\n \"\"\"\n Factory that returns a pprint function used by the default pprint of\n dicts and dict proxies.\n \"\"\"\n\n def inner(obj, p, cycle):\n typ = type(obj)\n if (\n basetype is not None\n and typ is not basetype\n and typ.__repr__ != basetype.__repr__\n ):\n # If the subclass provides its own repr, use it instead.\n return p.text(typ.__repr__(obj))\n\n if cycle:\n return p.text(\"{...}\")\n p.begin_group(1, start)\n keys = obj.keys()\n # if dict isn't large enough to be truncated, sort keys before displaying\n if not (p.max_seq_length and len(obj) >= p.max_seq_length):\n try:\n keys = sorted(keys)\n except Exception:\n # Sometimes the keys don't sort.\n pass\n for idx, key in p._enumerate(keys):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(key)\n p.text(\": \")\n p.pretty(obj[key])\n p.end_group(1, end)\n\n return inner\n\n\ndef _super_pprint(obj, p, cycle):\n \"\"\"The pprint for the super type.\"\"\"\n p.begin_group(8, \"<super: \")\n p.pretty(obj.__thisclass__)\n p.text(\",\")\n p.breakable()\n p.pretty(obj.__self__)\n p.end_group(8, \">\")\n\n\ndef _re_pattern_pprint(obj, p, cycle):\n \"\"\"The pprint function for regular expression patterns.\"\"\"\n p.text(\"re.compile(\")\n pattern = repr(obj.pattern)\n if pattern[:1] in \"uU\":\n pattern = pattern[1:]\n prefix = \"ur\"\n else:\n prefix = \"r\"\n pattern = prefix + pattern.replace(\"\\\\\\\\\", \"\\\\\")\n p.text(pattern)\n if obj.flags:\n p.text(\",\")\n p.breakable()\n done_one = False\n for flag in (\n \"TEMPLATE\",\n \"IGNORECASE\",\n \"LOCALE\",\n \"MULTILINE\",\n \"DOTALL\",\n \"UNICODE\",\n \"VERBOSE\",\n \"DEBUG\",\n ):\n if obj.flags & getattr(re, flag):\n if done_one:\n p.text(\"|\")\n p.text(\"re.\" + flag)\n done_one = True\n p.text(\")\")\n\n\ndef _type_pprint(obj, p, cycle):\n \"\"\"The pprint for classes and types.\"\"\"\n # Heap allocated types might not have the module attribute,\n # and others may set it to None.\n\n # Checks for a __repr__ override in the metaclass\n if type(obj).__repr__ is not type.__repr__:\n _repr_pprint(obj, p, cycle)\n return\n\n mod = _safe_getattr(obj, \"__module__\", None)\n try:\n name = obj.__qualname__\n if not isinstance(name, str):\n # This can happen if the type implements __qualname__ as a property\n # or other descriptor in Python 2.\n raise Exception(\"Try __name__\")\n except Exception:\n name = obj.__name__\n if not isinstance(name, str):\n name = \"<unknown type>\"\n\n if mod in (None, \"__builtin__\", \"builtins\", \"exceptions\"):\n p.text(name)\n else:\n p.text(mod + \".\" + name)\n\n\ndef _repr_pprint(obj, p, cycle):\n \"\"\"A pprint that just redirects to the normal repr function.\"\"\"\n # Find newlines and replace them with p.break_()\n output = repr(obj)\n for idx, output_line in enumerate(output.splitlines()):\n if idx:\n p.break_()\n p.text(output_line)\n\n\ndef _function_pprint(obj, p, cycle):\n \"\"\"Base pprint for all functions and builtin functions.\"\"\"\n name = _safe_getattr(obj, \"__qualname__\", obj.__name__)\n mod = obj.__module__\n if mod and mod not in (\"__builtin__\", \"builtins\", \"exceptions\"):\n name = mod + \".\" + name\n p.text(\"<function %s>\" % name)\n\n\ndef _exception_pprint(obj, p, cycle):\n \"\"\"Base pprint for all exceptions.\"\"\"\n name = getattr(obj.__class__, \"__qualname__\", obj.__class__.__name__)\n if obj.__class__.__module__ not in (\"exceptions\", \"builtins\"):\n name = f\"{obj.__class__.__module__}.{name}\"\n step = len(name) + 1\n p.begin_group(step, name + \"(\")\n for idx, arg in enumerate(getattr(obj, \"args\", ())):\n if idx:\n p.text(\",\")\n p.breakable()\n p.pretty(arg)\n p.end_group(step, \")\")\n\n\n@lazyobject\ndef _type_pprinters():\n #: printers for builtin types\n tp = {\n int: _repr_pprint,\n float: _repr_pprint,\n str: _repr_pprint,\n tuple: _seq_pprinter_factory(\"(\", \")\", tuple),\n list: _seq_pprinter_factory(\"[\", \"]\", list),\n dict: _dict_pprinter_factory(\"{\", \"}\", dict),\n set: _set_pprinter_factory(\"{\", \"}\", set),\n frozenset: _set_pprinter_factory(\"frozenset({\", \"})\", frozenset),\n super: _super_pprint,\n type(re.compile(\"\")): _re_pattern_pprint,\n type: _type_pprint,\n types.FunctionType: _function_pprint,\n types.BuiltinFunctionType: _function_pprint,\n types.MethodType: _repr_pprint,\n datetime.datetime: _repr_pprint,\n datetime.timedelta: _repr_pprint,\n }\n #: the exception base\n try:\n _exception_base = BaseException\n except NameError:\n _exception_base = Exception\n tp[_exception_base] = _exception_pprint\n try:\n tp[types.DictProxyType] = _dict_pprinter_factory(\"<dictproxy {\", \"}>\")\n tp[types.ClassType] = _type_pprint\n tp[types.SliceType] = _repr_pprint\n except AttributeError: # Python 3\n tp[slice] = _repr_pprint\n try:\n tp[xrange] = _repr_pprint\n tp[long] = _repr_pprint\n tp[unicode] = _repr_pprint\n except NameError:\n tp[range] = _repr_pprint\n tp[bytes] = _repr_pprint\n return tp\n\n\n#: printers for types specified by name\n@lazyobject\ndef _deferred_type_pprinters():\n dtp = {}\n for_type_by_name(\"collections\", \"defaultdict\", _defaultdict_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"OrderedDict\", _ordereddict_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"deque\", _deque_pprint, dtp=dtp)\n for_type_by_name(\"collections\", \"Counter\", _counter_pprint, dtp=dtp)\n return dtp\n\n\ndef for_type(typ, func):\n \"\"\"\n Add a pretty printer for a given type.\n \"\"\"\n oldfunc = _type_pprinters.get(typ, None)\n if func is not None:\n # To support easy restoration of old pprinters, we need to ignore Nones.\n _type_pprinters[typ] = func\n return oldfunc\n\n\ndef for_type_by_name(type_module, type_name, func, dtp=None):\n \"\"\"\n Add a pretty printer for a type specified by the module and name of a type\n rather than the type object itself.\n \"\"\"\n if dtp is None:\n dtp = _deferred_type_pprinters\n key = (type_module, type_name)\n oldfunc = dtp.get(key, None)\n if func is not None:\n # To support easy restoration of old pprinters, we need to ignore Nones.\n dtp[key] = func\n return oldfunc\n\n\n#: printers for the default singletons\n_singleton_pprinters = LazyObject(\n lambda: dict.fromkeys(\n map(id, [None, True, False, Ellipsis, NotImplemented]), _repr_pprint\n ),\n globals(),\n \"_singleton_pprinters\",\n)\n\n\ndef _defaultdict_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n else:\n p.pretty(obj.default_factory)\n p.text(\",\")\n p.breakable()\n p.pretty(dict(obj))\n\n\ndef _ordereddict_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n elif len(obj):\n p.pretty(list(obj.items()))\n\n\ndef _deque_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n else:\n p.pretty(list(obj))\n\n\ndef _counter_pprint(obj, p, cycle):\n name = obj.__class__.__name__\n with p.group(len(name) + 1, name + \"(\", \")\"):\n if cycle:\n p.text(\"...\")\n elif len(obj):\n p.pretty(dict(obj))\n",
"path": "xonsh/pretty.py"
}
] | diff --git a/xonsh/pretty.py b/xonsh/pretty.py
index 23013a9df2..f0e1536f57 100644
--- a/xonsh/pretty.py
+++ b/xonsh/pretty.py
@@ -118,7 +118,7 @@ def pretty(
"""
Pretty print the object's representation.
"""
- if hasattr(obj, "xonsh_display"):
+ if _safe_getattr(obj, "xonsh_display"):
return obj.xonsh_display()
stream = io.StringIO()
|
sktime__sktime-556 | [DOC] SlidingWindowSplitter start_with_window default value not consistent
#### Describe the issue linked to the documentation
https://github.com/alan-turing-institute/sktime/blob/139b9291fb634cce367f714a6132212b0172e199/sktime/forecasting/model_selection/_split.py#L174
It looks like the default value of start_with_window=False, but documentation states that it is True. Not sure which one was intended.
https://github.com/alan-turing-institute/sktime/blob/139b9291fb634cce367f714a6132212b0172e199/sktime/forecasting/model_selection/_split.py#L183
#### Suggest a potential alternative/fix
Change either the documentation or the default argument to match the other one
| [
{
"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__all__ = [\n \"SlidingWindowSplitter\",\n \"CutoffSplitter\",\n \"SingleWindowSplitter\",\n \"temporal_train_test_split\",\n]\n__author__ = [\"Markus Löning\"]\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split as _train_test_split\n\nfrom sktime.utils.validation import check_window_length\nfrom sktime.utils.validation.forecasting import check_cutoffs\nfrom sktime.utils.validation.forecasting import check_fh\nfrom sktime.utils.validation.forecasting import check_step_length\nfrom sktime.utils.validation.series import check_equal_time_index\nfrom sktime.utils.validation.series import check_time_index\n\nDEFAULT_STEP_LENGTH = 1\nDEFAULT_WINDOW_LENGTH = 10\nDEFAULT_FH = 1\n\n\nclass BaseSplitter:\n \"\"\"Base class for splitting time series during temporal cross-validation\n\n Parameters\n ----------\n window_length : int\n Length of rolling window\n fh : array-like or int, optional, (default=None)\n Single step ahead or array of steps ahead to forecast.\n \"\"\"\n\n def __init__(self, fh=DEFAULT_FH, window_length=DEFAULT_WINDOW_LENGTH):\n self.window_length = window_length\n self.fh = fh\n\n def split(self, y):\n \"\"\"Split y into windows.\n\n Parameters\n ----------\n y : pd.Series or pd.Index\n Time series to split\n\n Yields\n ------\n training_window : np.array\n Training window indices\n test_window : np.array\n Test window indices\n \"\"\"\n y = self._check_y(y)\n for training_window, test_window in self._split_windows(y):\n yield training_window[training_window >= 0], test_window[test_window >= 0]\n\n def _split_windows(self, y):\n \"\"\"Internal split method\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_n_splits(self, y=None):\n \"\"\"Return the number of splits.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_cutoffs(self, y=None):\n \"\"\"Return the cutoff points in time at which y is split.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_fh(self):\n \"\"\"Return the forecasting horizon\"\"\"\n return check_fh(self.fh)\n\n @staticmethod\n def _check_y(y):\n # allow for pd.Series\n if isinstance(y, pd.Series):\n y = y.index\n return check_time_index(y)\n\n def _check_fh(self):\n return check_fh(self.fh, enforce_relative=True).to_numpy()\n\n\nclass CutoffSplitter(BaseSplitter):\n \"\"\"Manual window splitter to split time series at given cutoff points.\n\n Parameters\n ----------\n cutoffs : np.array\n cutoff points, positive and integer-index like, usable with pandas\n .iloc[] indexing\n fh : int, list or np.array\n window_length : int\n \"\"\"\n\n def __init__(self, cutoffs, fh=DEFAULT_FH, window_length=DEFAULT_WINDOW_LENGTH):\n self.cutoffs = cutoffs\n super(CutoffSplitter, self).__init__(fh, window_length)\n\n def _split_windows(self, y):\n # cutoffs\n cutoffs = check_cutoffs(self.cutoffs)\n if not np.max(cutoffs) < len(y):\n raise ValueError(\"`cutoffs` are out-of-bounds for given `y`.\")\n\n fh = self._check_fh()\n\n if np.max(cutoffs) + np.max(fh) > len(y):\n raise ValueError(\"`fh` is out-of-bounds for given `cutoffs` and `y`.\")\n window_length = check_window_length(self.window_length)\n\n for cutoff in cutoffs:\n training_window = np.arange(cutoff - window_length, cutoff) + 1\n test_window = cutoff + fh\n yield training_window, test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return the number of splits\"\"\"\n return len(self.cutoffs)\n\n def get_cutoffs(self, y=None):\n \"\"\"Return the cutoff points\"\"\"\n return check_cutoffs(self.cutoffs)\n\n\nclass BaseWindowSplitter(BaseSplitter):\n \"\"\"Base class for window splits\"\"\"\n\n def __init__(self, fh=None, window_length=None):\n super(BaseWindowSplitter, self).__init__(fh=fh, window_length=window_length)\n\n def split_initial(self, y):\n raise NotImplementedError(\"abstract method\")\n\n def _get_end(self, y):\n \"\"\"Helper function to compute the end of the last window\"\"\"\n n_timepoints = len(y)\n fh = self._check_fh()\n window_length = check_window_length(self.window_length)\n\n # end point is end of last window\n is_in_sample = np.all(fh <= 0)\n if is_in_sample:\n end = n_timepoints + 1\n else:\n fh_max = fh[-1]\n end = n_timepoints - fh_max + 1 # non-inclusive end indexing\n\n # check if computed values are feasible with the provided index\n if window_length is not None:\n if window_length + fh_max > n_timepoints:\n raise ValueError(\n \"The window length and forecasting horizon are \"\n \"incompatible with the length of `y`\"\n )\n return end\n\n\nclass SlidingWindowSplitter(BaseWindowSplitter):\n \"\"\"Sliding window splitter\n\n Parameters\n ----------\n fh : int, list or np.array\n Forecasting horizon\n window_length : int\n step_length : int\n initial_window : int\n start_with_window : bool, optional (default=True)\n\n Examples\n --------\n For example for `window_length = 5`, `step_length = 1` and `fh = 3`\n here is a representation of the folds::\n\n |-----------------------|\n | * * * * * x x x - - - |\n | - * * * * * x x x - - |\n | - - * * * * * x x x - |\n | - - - * * * * * x x x |\n\n\n ``*`` = training fold.\n\n ``x`` = test fold.\n \"\"\"\n\n def __init__(\n self,\n fh=DEFAULT_FH,\n window_length=DEFAULT_WINDOW_LENGTH,\n step_length=DEFAULT_STEP_LENGTH,\n initial_window=None,\n start_with_window=False,\n ):\n\n self.step_length = step_length\n self.start_with_window = start_with_window\n self.initial_window = initial_window\n super(SlidingWindowSplitter, self).__init__(fh=fh, window_length=window_length)\n\n def _split_windows(self, y):\n step_length = check_step_length(self.step_length)\n window_length = check_window_length(self.window_length)\n fh = self._check_fh()\n\n end = self._get_end(y)\n start = self._get_start()\n for split_point in range(start, end, step_length):\n training_window = np.arange(split_point - window_length, split_point)\n test_window = split_point + fh - 1\n yield training_window, test_window\n\n def split_initial(self, y):\n \"\"\"Split initial window\n\n This is useful during forecasting model selection where we want to\n fit the forecaster on some part of the\n data first before doing temporal cross-validation\n\n Parameters\n ----------\n y : pd.Series\n\n Returns\n -------\n intial_training_window : np.array\n initial_test_window : np.array\n \"\"\"\n if self.initial_window is None:\n raise ValueError(\n \"Please specify initial window, found: `initial_window`=None\"\n )\n\n initial = check_window_length(self.initial_window)\n initial_training_window = np.arange(initial)\n initial_test_window = np.arange(initial, len(y))\n return initial_training_window, initial_test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return number of splits\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n n_splits : int\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \"\n f\"number of splits.\"\n )\n return len(self.get_cutoffs(y))\n\n def get_cutoffs(self, y=None):\n \"\"\"Get the cutoff time points.\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n cutoffs : np.array\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \" f\"cutoffs.\"\n )\n y = self._check_y(y)\n end = self._get_end(y)\n start = self._get_start()\n step_length = check_step_length(self.step_length)\n return np.arange(start, end, step_length) - 1\n\n def _get_start(self):\n window_length = check_window_length(self.window_length)\n if self.start_with_window:\n return window_length\n else:\n return 0\n\n\nclass SingleWindowSplitter(BaseWindowSplitter):\n \"\"\"Single window splitter\n\n Split time series once into a training and test window.\n\n Parameters\n ----------\n fh : int, list or np.array\n window_length : int\n \"\"\"\n\n def __init__(self, fh, window_length=None):\n super(SingleWindowSplitter, self).__init__(fh, window_length)\n\n def _split_windows(self, y):\n window_length = check_window_length(self.window_length)\n fh = self._check_fh()\n\n end = self._get_end(y) - 1\n start = 0 if window_length is None else end - window_length\n training_window = np.arange(start, end)\n test_window = end + fh - 1\n yield training_window, test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return number of splits\n\n Parameters\n ----------\n y : pd.Series, optional (default=None)\n\n Returns\n -------\n n_splits : int\n \"\"\"\n return 1\n\n def get_cutoffs(self, y=None):\n \"\"\"Get the cutoff time points.\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n cutoffs : np.array\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \" f\"cutoffs.\"\n )\n training_window, _ = next(self._split_windows(y))\n return training_window[-1:] # array outpu\n\n def split_initial(self, y):\n \"\"\"Split initial window\n\n This is useful during forecasting model selection where we want to\n fit the forecaster on some part of the\n data first before doing temporal cross-validation\n\n Parameters\n ----------\n y : pd.Series\n\n Returns\n -------\n intial_training_window : np.array\n initial_test_window : np.array\n \"\"\"\n # the single window splitter simply returns the single split\n training_window, _ = next(self._split_windows(y))\n test_window = np.arange(training_window[-1] + 1, len(y))\n return training_window, test_window\n\n\ndef temporal_train_test_split(y, X=None, test_size=None, train_size=None, fh=None):\n \"\"\"Split arrays or matrices into sequential train and test subsets\n Creates train/test splits over endogenous arrays an optional exogenous\n arrays. This is a wrapper of scikit-learn's ``train_test_split`` that\n does not shuffle.\n\n Parameters\n ----------\n *series : sequence of pd.Series with same length / shape[0]\n test_size : float, int or None, optional (default=None)\n If float, should be between 0.0 and 1.0 and represent the proportion\n of the dataset to include in the test split. If int, represents the\n relative number of test samples. If None, the value is set to the\n complement of the train size. If ``train_size`` is also None, it will\n be set to 0.25.\n train_size : float, int, or None, (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the relative number of train samples. If None,\n the value is automatically set to the complement of the test size.\n fh : ForecastingHorizon\n\n Returns\n -------\n splitting : list, length=2 * len(arrays)\n List containing train-test split of inputs.\n\n References\n ----------\n ..[1] adapted from https://github.com/alkaline-ml/pmdarima/\n \"\"\"\n if fh is not None:\n if test_size is not None or train_size is not None:\n raise ValueError(\n \"If `fh` is given, `test_size` and `train_size` cannot \"\n \"also be specified.\"\n )\n return _split_by_fh(y, fh, X=X)\n else:\n series = (y,) if X is None else (y, X)\n return _train_test_split(\n *series,\n shuffle=False,\n stratify=None,\n test_size=test_size,\n train_size=train_size,\n )\n\n\ndef _split_by_fh(y, fh, X=None):\n if X is not None:\n check_equal_time_index(y, X)\n fh = check_fh(fh)\n idx = fh.to_pandas()\n index = y.index\n\n if fh.is_relative:\n if not fh.is_all_out_of_sample():\n raise ValueError(\"`fh` must only contain out-of-sample values\")\n max_step = idx.max()\n steps = fh.to_indexer()\n train = index[:-max_step]\n test = index[-max_step:]\n\n y_test = y.loc[test[steps]]\n\n else:\n min_step, max_step = idx.min(), idx.max()\n train = index[index < min_step]\n test = index[(index <= max_step) & (min_step <= index)]\n\n y_test = y.loc[idx]\n\n y_train = y.loc[train]\n if X is None:\n return y_train, y_test\n\n else:\n X_train = X.loc[train]\n X_test = X.loc[test]\n return y_train, y_test, X_train, X_test\n",
"path": "sktime/forecasting/model_selection/_split.py"
}
] | [
{
"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__all__ = [\n \"SlidingWindowSplitter\",\n \"CutoffSplitter\",\n \"SingleWindowSplitter\",\n \"temporal_train_test_split\",\n]\n__author__ = [\"Markus Löning\"]\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split as _train_test_split\n\nfrom sktime.utils.validation import check_window_length\nfrom sktime.utils.validation.forecasting import check_cutoffs\nfrom sktime.utils.validation.forecasting import check_fh\nfrom sktime.utils.validation.forecasting import check_step_length\nfrom sktime.utils.validation.series import check_equal_time_index\nfrom sktime.utils.validation.series import check_time_index\n\nDEFAULT_STEP_LENGTH = 1\nDEFAULT_WINDOW_LENGTH = 10\nDEFAULT_FH = 1\n\n\nclass BaseSplitter:\n \"\"\"Base class for splitting time series during temporal cross-validation\n\n Parameters\n ----------\n window_length : int\n Length of rolling window\n fh : array-like or int, optional, (default=None)\n Single step ahead or array of steps ahead to forecast.\n \"\"\"\n\n def __init__(self, fh=DEFAULT_FH, window_length=DEFAULT_WINDOW_LENGTH):\n self.window_length = window_length\n self.fh = fh\n\n def split(self, y):\n \"\"\"Split y into windows.\n\n Parameters\n ----------\n y : pd.Series or pd.Index\n Time series to split\n\n Yields\n ------\n training_window : np.array\n Training window indices\n test_window : np.array\n Test window indices\n \"\"\"\n y = self._check_y(y)\n for training_window, test_window in self._split_windows(y):\n yield training_window[training_window >= 0], test_window[test_window >= 0]\n\n def _split_windows(self, y):\n \"\"\"Internal split method\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_n_splits(self, y=None):\n \"\"\"Return the number of splits.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_cutoffs(self, y=None):\n \"\"\"Return the cutoff points in time at which y is split.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def get_fh(self):\n \"\"\"Return the forecasting horizon\"\"\"\n return check_fh(self.fh)\n\n @staticmethod\n def _check_y(y):\n # allow for pd.Series\n if isinstance(y, pd.Series):\n y = y.index\n return check_time_index(y)\n\n def _check_fh(self):\n return check_fh(self.fh, enforce_relative=True).to_numpy()\n\n\nclass CutoffSplitter(BaseSplitter):\n \"\"\"Manual window splitter to split time series at given cutoff points.\n\n Parameters\n ----------\n cutoffs : np.array\n cutoff points, positive and integer-index like, usable with pandas\n .iloc[] indexing\n fh : int, list or np.array\n window_length : int\n \"\"\"\n\n def __init__(self, cutoffs, fh=DEFAULT_FH, window_length=DEFAULT_WINDOW_LENGTH):\n self.cutoffs = cutoffs\n super(CutoffSplitter, self).__init__(fh, window_length)\n\n def _split_windows(self, y):\n # cutoffs\n cutoffs = check_cutoffs(self.cutoffs)\n if not np.max(cutoffs) < len(y):\n raise ValueError(\"`cutoffs` are out-of-bounds for given `y`.\")\n\n fh = self._check_fh()\n\n if np.max(cutoffs) + np.max(fh) > len(y):\n raise ValueError(\"`fh` is out-of-bounds for given `cutoffs` and `y`.\")\n window_length = check_window_length(self.window_length)\n\n for cutoff in cutoffs:\n training_window = np.arange(cutoff - window_length, cutoff) + 1\n test_window = cutoff + fh\n yield training_window, test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return the number of splits\"\"\"\n return len(self.cutoffs)\n\n def get_cutoffs(self, y=None):\n \"\"\"Return the cutoff points\"\"\"\n return check_cutoffs(self.cutoffs)\n\n\nclass BaseWindowSplitter(BaseSplitter):\n \"\"\"Base class for window splits\"\"\"\n\n def __init__(self, fh=None, window_length=None):\n super(BaseWindowSplitter, self).__init__(fh=fh, window_length=window_length)\n\n def split_initial(self, y):\n raise NotImplementedError(\"abstract method\")\n\n def _get_end(self, y):\n \"\"\"Helper function to compute the end of the last window\"\"\"\n n_timepoints = len(y)\n fh = self._check_fh()\n window_length = check_window_length(self.window_length)\n\n # end point is end of last window\n is_in_sample = np.all(fh <= 0)\n if is_in_sample:\n end = n_timepoints + 1\n else:\n fh_max = fh[-1]\n end = n_timepoints - fh_max + 1 # non-inclusive end indexing\n\n # check if computed values are feasible with the provided index\n if window_length is not None:\n if window_length + fh_max > n_timepoints:\n raise ValueError(\n \"The window length and forecasting horizon are \"\n \"incompatible with the length of `y`\"\n )\n return end\n\n\nclass SlidingWindowSplitter(BaseWindowSplitter):\n \"\"\"Sliding window splitter\n\n Parameters\n ----------\n fh : int, list or np.array\n Forecasting horizon\n window_length : int\n step_length : int\n initial_window : int\n start_with_window : bool, optional (default=False)\n\n Examples\n --------\n For example for `window_length = 5`, `step_length = 1` and `fh = 3`\n here is a representation of the folds::\n\n |-----------------------|\n | * * * * * x x x - - - |\n | - * * * * * x x x - - |\n | - - * * * * * x x x - |\n | - - - * * * * * x x x |\n\n\n ``*`` = training fold.\n\n ``x`` = test fold.\n \"\"\"\n\n def __init__(\n self,\n fh=DEFAULT_FH,\n window_length=DEFAULT_WINDOW_LENGTH,\n step_length=DEFAULT_STEP_LENGTH,\n initial_window=None,\n start_with_window=False,\n ):\n\n self.step_length = step_length\n self.start_with_window = start_with_window\n self.initial_window = initial_window\n super(SlidingWindowSplitter, self).__init__(fh=fh, window_length=window_length)\n\n def _split_windows(self, y):\n step_length = check_step_length(self.step_length)\n window_length = check_window_length(self.window_length)\n fh = self._check_fh()\n\n end = self._get_end(y)\n start = self._get_start()\n for split_point in range(start, end, step_length):\n training_window = np.arange(split_point - window_length, split_point)\n test_window = split_point + fh - 1\n yield training_window, test_window\n\n def split_initial(self, y):\n \"\"\"Split initial window\n\n This is useful during forecasting model selection where we want to\n fit the forecaster on some part of the\n data first before doing temporal cross-validation\n\n Parameters\n ----------\n y : pd.Series\n\n Returns\n -------\n intial_training_window : np.array\n initial_test_window : np.array\n \"\"\"\n if self.initial_window is None:\n raise ValueError(\n \"Please specify initial window, found: `initial_window`=None\"\n )\n\n initial = check_window_length(self.initial_window)\n initial_training_window = np.arange(initial)\n initial_test_window = np.arange(initial, len(y))\n return initial_training_window, initial_test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return number of splits\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n n_splits : int\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \"\n f\"number of splits.\"\n )\n return len(self.get_cutoffs(y))\n\n def get_cutoffs(self, y=None):\n \"\"\"Get the cutoff time points.\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n cutoffs : np.array\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \" f\"cutoffs.\"\n )\n y = self._check_y(y)\n end = self._get_end(y)\n start = self._get_start()\n step_length = check_step_length(self.step_length)\n return np.arange(start, end, step_length) - 1\n\n def _get_start(self):\n window_length = check_window_length(self.window_length)\n if self.start_with_window:\n return window_length\n else:\n return 0\n\n\nclass SingleWindowSplitter(BaseWindowSplitter):\n \"\"\"Single window splitter\n\n Split time series once into a training and test window.\n\n Parameters\n ----------\n fh : int, list or np.array\n window_length : int\n \"\"\"\n\n def __init__(self, fh, window_length=None):\n super(SingleWindowSplitter, self).__init__(fh, window_length)\n\n def _split_windows(self, y):\n window_length = check_window_length(self.window_length)\n fh = self._check_fh()\n\n end = self._get_end(y) - 1\n start = 0 if window_length is None else end - window_length\n training_window = np.arange(start, end)\n test_window = end + fh - 1\n yield training_window, test_window\n\n def get_n_splits(self, y=None):\n \"\"\"Return number of splits\n\n Parameters\n ----------\n y : pd.Series, optional (default=None)\n\n Returns\n -------\n n_splits : int\n \"\"\"\n return 1\n\n def get_cutoffs(self, y=None):\n \"\"\"Get the cutoff time points.\n\n Parameters\n ----------\n y : pd.Series or pd.Index, optional (default=None)\n\n Returns\n -------\n cutoffs : np.array\n \"\"\"\n if y is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires `y` to compute the \" f\"cutoffs.\"\n )\n training_window, _ = next(self._split_windows(y))\n return training_window[-1:] # array outpu\n\n def split_initial(self, y):\n \"\"\"Split initial window\n\n This is useful during forecasting model selection where we want to\n fit the forecaster on some part of the\n data first before doing temporal cross-validation\n\n Parameters\n ----------\n y : pd.Series\n\n Returns\n -------\n intial_training_window : np.array\n initial_test_window : np.array\n \"\"\"\n # the single window splitter simply returns the single split\n training_window, _ = next(self._split_windows(y))\n test_window = np.arange(training_window[-1] + 1, len(y))\n return training_window, test_window\n\n\ndef temporal_train_test_split(y, X=None, test_size=None, train_size=None, fh=None):\n \"\"\"Split arrays or matrices into sequential train and test subsets\n Creates train/test splits over endogenous arrays an optional exogenous\n arrays. This is a wrapper of scikit-learn's ``train_test_split`` that\n does not shuffle.\n\n Parameters\n ----------\n *series : sequence of pd.Series with same length / shape[0]\n test_size : float, int or None, optional (default=None)\n If float, should be between 0.0 and 1.0 and represent the proportion\n of the dataset to include in the test split. If int, represents the\n relative number of test samples. If None, the value is set to the\n complement of the train size. If ``train_size`` is also None, it will\n be set to 0.25.\n train_size : float, int, or None, (default=None)\n If float, should be between 0.0 and 1.0 and represent the\n proportion of the dataset to include in the train split. If\n int, represents the relative number of train samples. If None,\n the value is automatically set to the complement of the test size.\n fh : ForecastingHorizon\n\n Returns\n -------\n splitting : list, length=2 * len(arrays)\n List containing train-test split of inputs.\n\n References\n ----------\n ..[1] adapted from https://github.com/alkaline-ml/pmdarima/\n \"\"\"\n if fh is not None:\n if test_size is not None or train_size is not None:\n raise ValueError(\n \"If `fh` is given, `test_size` and `train_size` cannot \"\n \"also be specified.\"\n )\n return _split_by_fh(y, fh, X=X)\n else:\n series = (y,) if X is None else (y, X)\n return _train_test_split(\n *series,\n shuffle=False,\n stratify=None,\n test_size=test_size,\n train_size=train_size,\n )\n\n\ndef _split_by_fh(y, fh, X=None):\n if X is not None:\n check_equal_time_index(y, X)\n fh = check_fh(fh)\n idx = fh.to_pandas()\n index = y.index\n\n if fh.is_relative:\n if not fh.is_all_out_of_sample():\n raise ValueError(\"`fh` must only contain out-of-sample values\")\n max_step = idx.max()\n steps = fh.to_indexer()\n train = index[:-max_step]\n test = index[-max_step:]\n\n y_test = y.loc[test[steps]]\n\n else:\n min_step, max_step = idx.min(), idx.max()\n train = index[index < min_step]\n test = index[(index <= max_step) & (min_step <= index)]\n\n y_test = y.loc[idx]\n\n y_train = y.loc[train]\n if X is None:\n return y_train, y_test\n\n else:\n X_train = X.loc[train]\n X_test = X.loc[test]\n return y_train, y_test, X_train, X_test\n",
"path": "sktime/forecasting/model_selection/_split.py"
}
] | diff --git a/.all-contributorsrc b/.all-contributorsrc
index 3cdf7415418..cd1c417af59 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -717,7 +717,8 @@
"profile": "https://github.com/ngupta23",
"contributions": [
"code",
- "bug"
+ "bug",
+ "doc"
]
},
{
diff --git a/sktime/forecasting/model_selection/_split.py b/sktime/forecasting/model_selection/_split.py
index 7082fcf92fd..d1598c108f5 100644
--- a/sktime/forecasting/model_selection/_split.py
+++ b/sktime/forecasting/model_selection/_split.py
@@ -172,7 +172,7 @@ class SlidingWindowSplitter(BaseWindowSplitter):
window_length : int
step_length : int
initial_window : int
- start_with_window : bool, optional (default=True)
+ start_with_window : bool, optional (default=False)
Examples
--------
|
scikit-hep__pyhf-1790 | Guard SCHEMA_VERSION from version bumps
I don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.
This is going to be a problem when
https://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2
hits `1.0.0` and conflicts with
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13
and also has to properly pick up the multiple correct instances in
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_
| [
{
"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n",
"path": "setup.py"
}
] | [
{
"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 52dbd81742..9fb006aeb5 100644
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@
+ extras_require['test']
+ [
'nbdime',
- 'bump2version',
+ 'tbump>=6.7.0',
'ipython',
'pre-commit',
'check-manifest',
diff --git a/tbump.toml b/tbump.toml
new file mode 100644
index 0000000000..e03aeb71c4
--- /dev/null
+++ b/tbump.toml
@@ -0,0 +1,61 @@
+github_url = "https://github.com/scikit-hep/pyhf/"
+
+[version]
+current = "0.6.3"
+
+# Example of a semver regexp.
+# Make sure this matches current_version before
+# using tbump
+regex = '''
+ (?P<major>\d+)
+ \.
+ (?P<minor>\d+)
+ \.
+ (?P<patch>\d+)
+ (rc
+ (?P<candidate>\d+)
+ )?
+ '''
+
+[git]
+# The current version will get updated when tbump is run
+message_template = "Bump version: 0.6.3 → {new_version}"
+tag_template = "v{new_version}"
+
+# For each file to patch, add a [[file]] config
+# section containing the path of the file, relative to the
+# tbump.toml location.
+[[file]]
+src = "tbump.toml"
+# Restrict search to make it explicit why tbump.toml
+# is even included as a file to bump, as it will get
+# its version.current attribute bumped anyway.
+search = "Bump version: {current_version} → "
+
+[[file]]
+src = "src/pyhf/utils.py"
+# Guard SCHEMA_VERSION
+# This search is just identifying the line to restrict the
+# regex to, but all matches in the line will get bumped.
+search = "pyhf: v{current_version}"
+
+[[file]]
+src = "README.rst"
+
+[[file]]
+src = "src/pyhf/data/citation.bib"
+
+[[file]]
+src = ".zenodo.json"
+
+[[file]]
+src = "codemeta.json"
+
+[[file]]
+src = "CITATION.cff"
+
+[[field]]
+# the name of the field
+name = "candidate"
+# the default value to use, if there is no match
+default = ""
|
joke2k__faker-512 | Using É, é (e-acute) in emails.
It looks that É, é (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned:
andré[email protected]
But email verification was failed for this email.
Could you remove É, é and other such letters if they are present from valid email generation?
| [
{
"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('ä', 'ae'), ('Ä', 'Ae'),\n ('ö', 'oe'), ('Ö', 'Oe'),\n ('ü', 'ue'), ('Ü', 'Ue'),\n ('ß', 'ss'),\n )\n",
"path": "faker/providers/internet/de_DE/__init__.py"
}
] | [
{
"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('ä', 'ae'), ('Ä', 'Ae'),\n ('ö', 'oe'), ('Ö', 'Oe'),\n ('ü', 'ue'), ('Ü', 'Ue'),\n ('é', 'e'), ('É', 'E'),\n ('à', 'a'), ('À', 'A'),\n ('ß', 'ss'),\n )\n",
"path": "faker/providers/internet/de_DE/__init__.py"
}
] | diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py
index 76aaec7ddf..231d57aa0f 100644
--- a/faker/providers/internet/de_DE/__init__.py
+++ b/faker/providers/internet/de_DE/__init__.py
@@ -15,5 +15,7 @@ class Provider(InternetProvider):
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
+ ('é', 'e'), ('É', 'E'),
+ ('à', 'a'), ('À', 'A'),
('ß', 'ss'),
)
|
openstates__openstates-scrapers-2982 | OR failing since at least 2019-06-09
OR has been failing since 2019-06-09
Based on automated runs it appears that OR has not run successfully in 2 days (2019-06-09).
```
loaded Open States pupa settings...
or (scrape, import)
bills: {}
votes: {}
08:01:13 CRITICAL pupa: Session(s) 2019-2020 Interim were reported by Oregon.get_session_list() but were not found in Oregon.legislative_sessions or Oregon.ignored_scraped_sessions.
```
Visit http://bobsled.openstates.org for more info.
| [
{
"content": "from pupa.scrape import Jurisdiction, Organization\nfrom .people import ORPersonScraper\n# from .committees import ORCommitteeScraper\nfrom .bills import ORBillScraper\nfrom .votes import ORVoteScraper\n\n\nclass Oregon(Jurisdiction):\n division_id = \"ocd-division/country:us/state:or\"\n classification = \"government\"\n name = \"Oregon\"\n url = \"https://olis.leg.state.or.us\"\n scrapers = {\n 'people': ORPersonScraper,\n # 'committees': ORCommitteeScraper,\n 'bills': ORBillScraper,\n 'votes': ORVoteScraper\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"2007 Regular Session\",\n \"identifier\": \"2007 Regular Session\",\n \"name\": \"2007 Regular Session\"\n },\n {\n \"_scraped_name\": \"2008 Special Session\",\n \"identifier\": \"2008 Special Session\",\n \"name\": \"2008 Special Session\"\n },\n {\n \"_scraped_name\": \"2009 Regular Session\",\n \"identifier\": \"2009 Regular Session\",\n \"name\": \"2009 Regular Session\"\n },\n {\n \"_scraped_name\": \"2010 Special Session\",\n \"identifier\": \"2012 Special Session\",\n \"name\": \"2010 Special Session\"\n },\n {\n \"_scraped_name\": \"2011 Regular Session\",\n \"identifier\": \"2011 Regular Session\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Regular Session\",\n \"identifier\": \"2012 Regular Session\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Special Session\",\n \"identifier\": \"2012 Special Session\",\n \"name\": \"2012 Speical Session\"\n },\n {\n \"_scraped_name\": \"2013 Regular Session\",\n \"identifier\": \"2013 Regular Session\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013 Special Session\",\n \"identifier\": \"2013 Special Session\",\n \"name\": \"2013 Special Session\"\n },\n {\n \"_scraped_name\": \"2014 Regular Session\",\n \"identifier\": \"2014 Regular Session\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2015 Regular Session\",\n \"identifier\": \"2015 Regular Session\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"2016 Regular Session\",\n \"identifier\": \"2016 Regular Session\",\n \"name\": \"2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"2017 Regular Session\",\n \"end_date\": \"2017-07-10\",\n \"identifier\": \"2017 Regular Session\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-02-01\"\n },\n {\n \"_scraped_name\": \"2018 Regular Session\",\n \"identifier\": \"2018 Regular Session\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-02-05\",\n \"end_date\": \"2018-03-09\",\n },\n {\n \"_scraped_name\": \"2018 1st Special Session\",\n \"identifier\": \"2018 Special Session\",\n \"name\": \"2018 Special Session\",\n \"start_date\": \"2018-05-21\",\n \"end_date\": \"2018-05-21\",\n },\n {\n \"_scraped_name\": \"2019 Regular Session\",\n \"identifier\": \"2019 Regular Session\",\n \"name\": \"2019 Regular Session\",\n \"start_date\": \"2019-01-22\",\n \"end_date\": \"2019-06-30\",\n },\n ]\n ignored_scraped_sessions = [\n \"Today\",\n \"2017-2018 Interim\",\n \"2015-2016 Interim\",\n \"2013 1st Special Session\",\n \"2012 1st Special Session\",\n \"2013 - 2014 Interim\",\n \"2011 - 2012 Interim\",\n \"2009 - 2010 Interim\",\n \"2007 - 2008 Interim\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Oregon Legislative Assembly\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n from .apiclient import OregonLegislatorODataClient\n sessions = OregonLegislatorODataClient(None).all_sessions()\n sessions = [s['SessionName'] for s in sessions]\n return sessions\n",
"path": "openstates/or/__init__.py"
}
] | [
{
"content": "from pupa.scrape import Jurisdiction, Organization\nfrom .people import ORPersonScraper\n# from .committees import ORCommitteeScraper\nfrom .bills import ORBillScraper\nfrom .votes import ORVoteScraper\n\n\nclass Oregon(Jurisdiction):\n division_id = \"ocd-division/country:us/state:or\"\n classification = \"government\"\n name = \"Oregon\"\n url = \"https://olis.leg.state.or.us\"\n scrapers = {\n 'people': ORPersonScraper,\n # 'committees': ORCommitteeScraper,\n 'bills': ORBillScraper,\n 'votes': ORVoteScraper\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"2007 Regular Session\",\n \"identifier\": \"2007 Regular Session\",\n \"name\": \"2007 Regular Session\"\n },\n {\n \"_scraped_name\": \"2008 Special Session\",\n \"identifier\": \"2008 Special Session\",\n \"name\": \"2008 Special Session\"\n },\n {\n \"_scraped_name\": \"2009 Regular Session\",\n \"identifier\": \"2009 Regular Session\",\n \"name\": \"2009 Regular Session\"\n },\n {\n \"_scraped_name\": \"2010 Special Session\",\n \"identifier\": \"2012 Special Session\",\n \"name\": \"2010 Special Session\"\n },\n {\n \"_scraped_name\": \"2011 Regular Session\",\n \"identifier\": \"2011 Regular Session\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Regular Session\",\n \"identifier\": \"2012 Regular Session\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012 Special Session\",\n \"identifier\": \"2012 Special Session\",\n \"name\": \"2012 Speical Session\"\n },\n {\n \"_scraped_name\": \"2013 Regular Session\",\n \"identifier\": \"2013 Regular Session\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013 Special Session\",\n \"identifier\": \"2013 Special Session\",\n \"name\": \"2013 Special Session\"\n },\n {\n \"_scraped_name\": \"2014 Regular Session\",\n \"identifier\": \"2014 Regular Session\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2015 Regular Session\",\n \"identifier\": \"2015 Regular Session\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"2016 Regular Session\",\n \"identifier\": \"2016 Regular Session\",\n \"name\": \"2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"2017 Regular Session\",\n \"end_date\": \"2017-07-10\",\n \"identifier\": \"2017 Regular Session\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-02-01\"\n },\n {\n \"_scraped_name\": \"2018 Regular Session\",\n \"identifier\": \"2018 Regular Session\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-02-05\",\n \"end_date\": \"2018-03-09\",\n },\n {\n \"_scraped_name\": \"2018 1st Special Session\",\n \"identifier\": \"2018 Special Session\",\n \"name\": \"2018 Special Session\",\n \"start_date\": \"2018-05-21\",\n \"end_date\": \"2018-05-21\",\n },\n {\n \"_scraped_name\": \"2019 Regular Session\",\n \"identifier\": \"2019 Regular Session\",\n \"name\": \"2019 Regular Session\",\n \"start_date\": \"2019-01-22\",\n \"end_date\": \"2019-06-30\",\n },\n ]\n ignored_scraped_sessions = [\n \"Today\",\n \"2019-2020 Interim\",\n \"2017-2018 Interim\",\n \"2015-2016 Interim\",\n \"2013 1st Special Session\",\n \"2012 1st Special Session\",\n \"2013 - 2014 Interim\",\n \"2011 - 2012 Interim\",\n \"2009 - 2010 Interim\",\n \"2007 - 2008 Interim\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Oregon Legislative Assembly\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n from .apiclient import OregonLegislatorODataClient\n sessions = OregonLegislatorODataClient(None).all_sessions()\n sessions = [s['SessionName'] for s in sessions]\n return sessions\n",
"path": "openstates/or/__init__.py"
}
] | diff --git a/openstates/or/__init__.py b/openstates/or/__init__.py
index 2630876cc0..18c4f1eaa6 100644
--- a/openstates/or/__init__.py
+++ b/openstates/or/__init__.py
@@ -108,6 +108,7 @@ class Oregon(Jurisdiction):
]
ignored_scraped_sessions = [
"Today",
+ "2019-2020 Interim",
"2017-2018 Interim",
"2015-2016 Interim",
"2013 1st Special Session",
|
chainer__chainer-8219 | pytest is causing error in Jenkins
Example:
https://jenkins.preferred.jp/job/chainer/job/chainer_pr/2162/TEST=CHAINERX_chainer-py3,label=mn1-p100/console
```
14:33:27 + pytest -rfEX --showlocals -m 'not slow and not ideep' /repo/tests/chainer_tests
14:33:28 Traceback (most recent call last):
14:33:28 File "/workspace/conda/envs/testenv/bin/pytest", line 10, in <module>
14:33:28 sys.exit(main())
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/config/__init__.py", line 61, in main
14:33:28 config = _prepareconfig(args, plugins)
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/config/__init__.py", line 182, in _prepareconfig
14:33:28 config = get_config()
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/config/__init__.py", line 156, in get_config
14:33:28 pluginmanager.import_plugin(spec)
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/config/__init__.py", line 530, in import_plugin
14:33:28 __import__(importspec)
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/tmpdir.py", line 25, in <module>
14:33:28 class TempPathFactory(object):
14:33:28 File "/workspace/conda/envs/testenv/lib/python3.6/site-packages/_pytest/tmpdir.py", line 35, in TempPathFactory
14:33:28 lambda p: Path(os.path.abspath(six.text_type(p)))
14:33:28 TypeError: attrib() got an unexpected keyword argument 'convert'
```
| [
{
"content": "#!/usr/bin/env python\n\nimport os\nimport pkg_resources\nimport sys\n\nfrom setuptools import setup\n\nimport chainerx_build_helper\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nChainer does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use Chainer with Python 3.5.0 at your own risk,\nset CHAINER_PYTHON_350_FORCE environment variable to 1.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'install': [\n 'setuptools',\n # typing==3.7.4 causes error \"TypeError: Instance and class checks can\n # only be used with @runtime_checkable protocols\" only with Python 2.\n # https://github.com/chainer/chainer/pull/7562\n 'typing' + ('<=3.6.6' if sys.version_info[0] <= 2 else ''),\n 'typing_extensions' + ('<=3.6.6' if sys.version_info[0] <= 2 else ''),\n 'filelock',\n 'numpy>=1.9.0',\n # protobuf 3.8.0rc1 causes CI errors.\n # TODO(niboshi): Probably we should always use pip in CIs for\n # installing chainer. It avoids pre-release dependencies by default.\n # See also: https://github.com/pypa/setuptools/issues/855\n 'protobuf>=3.0.0,<3.8.0rc1',\n 'six>=1.9.0',\n ],\n 'stylecheck': [\n 'autopep8>=1.4.1,<1.5',\n 'flake8>=3.7,<3.8',\n 'pycodestyle>=2.5,<2.6',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'mock',\n ],\n 'doctest': [\n 'sphinx==1.8.2',\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx==1.8.2',\n 'sphinx_rtd_theme',\n ],\n 'appveyor': [\n '-r test',\n # pytest-timeout>=1.3.0 requires pytest>=3.6.\n # TODO(niboshi): Consider upgrading pytest to >=3.6\n 'pytest-timeout<1.3.0',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\nsetup_requires = []\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\ndef find_any_distribution(pkgs):\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nmn_pkg = find_any_distribution(['chainermn'])\nif mn_pkg is not None:\n msg = \"\"\"\nWe detected that ChainerMN is installed in your environment.\nChainerMN has been integrated to Chainer and no separate installation\nis necessary. Please uninstall the old ChainerMN in advance.\n\"\"\"\n print(msg)\n exit(1)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'chainer', '_version.py')).read())\n\n\nsetup_kwargs = dict(\n name='chainer',\n version=__version__, # NOQA\n description='A flexible framework of neural networks',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://chainer.org/',\n license='MIT License',\n packages=['chainer',\n 'chainer.backends',\n 'chainer.dataset',\n 'chainer.dataset.tabular',\n 'chainer.datasets',\n 'chainer.distributions',\n 'chainer.exporters',\n 'chainer.functions',\n 'chainer.functions.activation',\n 'chainer.functions.array',\n 'chainer.functions.connection',\n 'chainer.functions.evaluation',\n 'chainer.functions.loss',\n 'chainer.functions.math',\n 'chainer.functions.noise',\n 'chainer.functions.normalization',\n 'chainer.functions.pooling',\n 'chainer.functions.rnn',\n 'chainer.functions.theano',\n 'chainer.functions.util',\n 'chainer.function_hooks',\n 'chainer.iterators',\n 'chainer.initializers',\n 'chainer.links',\n 'chainer.links.activation',\n 'chainer.links.caffe',\n 'chainer.links.caffe.protobuf3',\n 'chainer.links.connection',\n 'chainer.links.loss',\n 'chainer.links.model',\n 'chainer.links.model.vision',\n 'chainer.links.normalization',\n 'chainer.links.rnn',\n 'chainer.links.theano',\n 'chainer.link_hooks',\n 'chainer.graph_optimizations',\n 'chainer.optimizers',\n 'chainer.optimizer_hooks',\n 'chainer.serializers',\n 'chainer.testing',\n 'chainer.training',\n 'chainer.training.extensions',\n 'chainer.training.triggers',\n 'chainer.training.updaters',\n 'chainer.utils',\n 'chainermn',\n 'chainermn.communicators',\n 'chainermn.datasets',\n 'chainermn.extensions',\n 'chainermn.functions',\n 'chainermn.iterators',\n 'chainermn.links'],\n package_data={\n 'chainer': ['py.typed'],\n },\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n)\n\n\nbuild_chainerx = 0 != int(os.getenv('CHAINER_BUILD_CHAINERX', '0'))\nif (os.getenv('READTHEDOCS', None) == 'True'\n and os.getenv('READTHEDOCS_PROJECT', None) == 'chainer'):\n os.environ['MAKEFLAGS'] = '-j2'\n build_chainerx = True\n\nchainerx_build_helper.config_setup_kwargs(setup_kwargs, build_chainerx)\n\n\nsetup(**setup_kwargs)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nimport os\nimport pkg_resources\nimport sys\n\nfrom setuptools import setup\n\nimport chainerx_build_helper\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nChainer does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use Chainer with Python 3.5.0 at your own risk,\nset CHAINER_PYTHON_350_FORCE environment variable to 1.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'install': [\n 'setuptools',\n # typing==3.7.4 causes error \"TypeError: Instance and class checks can\n # only be used with @runtime_checkable protocols\" only with Python 2.\n # https://github.com/chainer/chainer/pull/7562\n 'typing' + ('<=3.6.6' if sys.version_info[0] <= 2 else ''),\n 'typing_extensions' + ('<=3.6.6' if sys.version_info[0] <= 2 else ''),\n 'filelock',\n 'numpy>=1.9.0',\n # protobuf 3.8.0rc1 causes CI errors.\n # TODO(niboshi): Probably we should always use pip in CIs for\n # installing chainer. It avoids pre-release dependencies by default.\n # See also: https://github.com/pypa/setuptools/issues/855\n 'protobuf>=3.0.0,<3.8.0rc1',\n 'six>=1.9.0',\n ],\n 'stylecheck': [\n 'autopep8>=1.4.1,<1.5',\n 'flake8>=3.7,<3.8',\n 'pycodestyle>=2.5,<2.6',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'sphinx==1.8.2',\n 'matplotlib',\n 'theano',\n ],\n 'docs': [\n 'sphinx==1.8.2',\n 'sphinx_rtd_theme',\n ],\n 'appveyor': [\n '-r test',\n # pytest-timeout>=1.3.0 requires pytest>=3.6.\n # TODO(niboshi): Consider upgrading pytest to >=3.6\n 'pytest-timeout<1.3.0',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\nsetup_requires = []\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\ndef find_any_distribution(pkgs):\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nmn_pkg = find_any_distribution(['chainermn'])\nif mn_pkg is not None:\n msg = \"\"\"\nWe detected that ChainerMN is installed in your environment.\nChainerMN has been integrated to Chainer and no separate installation\nis necessary. Please uninstall the old ChainerMN in advance.\n\"\"\"\n print(msg)\n exit(1)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'chainer', '_version.py')).read())\n\n\nsetup_kwargs = dict(\n name='chainer',\n version=__version__, # NOQA\n description='A flexible framework of neural networks',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://chainer.org/',\n license='MIT License',\n packages=['chainer',\n 'chainer.backends',\n 'chainer.dataset',\n 'chainer.dataset.tabular',\n 'chainer.datasets',\n 'chainer.distributions',\n 'chainer.exporters',\n 'chainer.functions',\n 'chainer.functions.activation',\n 'chainer.functions.array',\n 'chainer.functions.connection',\n 'chainer.functions.evaluation',\n 'chainer.functions.loss',\n 'chainer.functions.math',\n 'chainer.functions.noise',\n 'chainer.functions.normalization',\n 'chainer.functions.pooling',\n 'chainer.functions.rnn',\n 'chainer.functions.theano',\n 'chainer.functions.util',\n 'chainer.function_hooks',\n 'chainer.iterators',\n 'chainer.initializers',\n 'chainer.links',\n 'chainer.links.activation',\n 'chainer.links.caffe',\n 'chainer.links.caffe.protobuf3',\n 'chainer.links.connection',\n 'chainer.links.loss',\n 'chainer.links.model',\n 'chainer.links.model.vision',\n 'chainer.links.normalization',\n 'chainer.links.rnn',\n 'chainer.links.theano',\n 'chainer.link_hooks',\n 'chainer.graph_optimizations',\n 'chainer.optimizers',\n 'chainer.optimizer_hooks',\n 'chainer.serializers',\n 'chainer.testing',\n 'chainer.training',\n 'chainer.training.extensions',\n 'chainer.training.triggers',\n 'chainer.training.updaters',\n 'chainer.utils',\n 'chainermn',\n 'chainermn.communicators',\n 'chainermn.datasets',\n 'chainermn.extensions',\n 'chainermn.functions',\n 'chainermn.iterators',\n 'chainermn.links'],\n package_data={\n 'chainer': ['py.typed'],\n },\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n)\n\n\nbuild_chainerx = 0 != int(os.getenv('CHAINER_BUILD_CHAINERX', '0'))\nif (os.getenv('READTHEDOCS', None) == 'True'\n and os.getenv('READTHEDOCS_PROJECT', None) == 'chainer'):\n os.environ['MAKEFLAGS'] = '-j2'\n build_chainerx = True\n\nchainerx_build_helper.config_setup_kwargs(setup_kwargs, build_chainerx)\n\n\nsetup(**setup_kwargs)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 61bce8169532..2fabb8e706f1 100644
--- a/setup.py
+++ b/setup.py
@@ -45,6 +45,7 @@
],
'test': [
'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
+ 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
'mock',
],
'doctest': [
|
redis__redis-py-1069 | AttributeError: 'UnixDomainSocketConnection' object has no attribute '_buffer_cutoff'
Since version 3.0, redis client seems broken. I cannot even get, set or keys() anything when connecting to unix socket.
I tried running it in a docker container running centos 7.5.1804 core using python3.6.
Steps to reproduce:
install redis 3.0
```python
import redis
red = redis.Redis(unix_socket_path = "/path/to/socket")
red.keys()
```
Throws this:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.6/site-packages/redis/client.py", line 1262, in keys
return self.execute_command('KEYS', pattern)
File "/usr/lib/python3.6/site-packages/redis/client.py", line 754, in execute_command
connection.send_command(*args)
File "/usr/lib/python3.6/site-packages/redis/connection.py", line 619, in send_command
self.send_packed_command(self.pack_command(*args))
File "/usr/lib/python3.6/site-packages/redis/connection.py", line 658, in pack_command
buffer_cutoff = self._buffer_cutoff
AttributeError: 'UnixDomainSocketConnection' object has no attribute '_buffer_cutoff'
```
| [
{
"content": "from __future__ import unicode_literals\nfrom distutils.version import StrictVersion\nfrom itertools import chain\nimport io\nimport os\nimport socket\nimport sys\nimport threading\nimport warnings\n\ntry:\n import ssl\n ssl_available = True\nexcept ImportError:\n ssl_available = False\n\nfrom redis._compat import (xrange, imap, byte_to_chr, unicode, bytes, long,\n nativestr, basestring, iteritems,\n LifoQueue, Empty, Full, urlparse, parse_qs,\n recv, recv_into, select, unquote)\nfrom redis.exceptions import (\n DataError,\n RedisError,\n ConnectionError,\n TimeoutError,\n BusyLoadingError,\n ResponseError,\n InvalidResponse,\n AuthenticationError,\n NoScriptError,\n ExecAbortError,\n ReadOnlyError\n)\nfrom redis.utils import HIREDIS_AVAILABLE\nif HIREDIS_AVAILABLE:\n import hiredis\n\n hiredis_version = StrictVersion(hiredis.__version__)\n HIREDIS_SUPPORTS_CALLABLE_ERRORS = \\\n hiredis_version >= StrictVersion('0.1.3')\n HIREDIS_SUPPORTS_BYTE_BUFFER = \\\n hiredis_version >= StrictVersion('0.1.4')\n\n if not HIREDIS_SUPPORTS_BYTE_BUFFER:\n msg = (\"redis-py works best with hiredis >= 0.1.4. You're running \"\n \"hiredis %s. Please consider upgrading.\" % hiredis.__version__)\n warnings.warn(msg)\n\n HIREDIS_USE_BYTE_BUFFER = True\n # only use byte buffer if hiredis supports it\n if not HIREDIS_SUPPORTS_BYTE_BUFFER:\n HIREDIS_USE_BYTE_BUFFER = False\n\nSYM_STAR = b'*'\nSYM_DOLLAR = b'$'\nSYM_CRLF = b'\\r\\n'\nSYM_EMPTY = b''\n\nSERVER_CLOSED_CONNECTION_ERROR = \"Connection closed by server.\"\n\n\nclass Token(object):\n \"\"\"\n Literal strings in Redis commands, such as the command names and any\n hard-coded arguments are wrapped in this class so we know not to apply\n and encoding rules on them.\n \"\"\"\n\n _cache = {}\n\n @classmethod\n def get_token(cls, value):\n \"Gets a cached token object or creates a new one if not already cached\"\n\n # Use try/except because after running for a short time most tokens\n # should already be cached\n try:\n return cls._cache[value]\n except KeyError:\n token = Token(value)\n cls._cache[value] = token\n return token\n\n def __init__(self, value):\n if isinstance(value, Token):\n value = value.value\n self.value = value\n self.encoded_value = value.encode()\n\n def __repr__(self):\n return self.value\n\n def __str__(self):\n return self.value\n\n\nclass Encoder(object):\n \"Encode strings to bytes and decode bytes to strings\"\n\n def __init__(self, encoding, encoding_errors, decode_responses):\n self.encoding = encoding\n self.encoding_errors = encoding_errors\n self.decode_responses = decode_responses\n\n def encode(self, value):\n \"Return a bytestring representation of the value\"\n if isinstance(value, Token):\n return value.encoded_value\n elif isinstance(value, bytes):\n return value\n elif isinstance(value, bool):\n # special case bool since it is a subclass of int\n raise DataError(\"Invalid input of type: 'bool'. Convert to a \"\n \"byte, string or number first.\")\n elif isinstance(value, float):\n value = repr(value).encode()\n elif isinstance(value, (int, long)):\n # python 2 repr() on longs is '123L', so use str() instead\n value = str(value).encode()\n elif not isinstance(value, basestring):\n # a value we don't know how to deal with. throw an error\n typename = type(value).__name__\n raise DataError(\"Invalid input of type: '%s'. Convert to a \"\n \"byte, string or number first.\" % typename)\n if isinstance(value, unicode):\n value = value.encode(self.encoding, self.encoding_errors)\n return value\n\n def decode(self, value, force=False):\n \"Return a unicode string from the byte representation\"\n if (self.decode_responses or force) and isinstance(value, bytes):\n value = value.decode(self.encoding, self.encoding_errors)\n return value\n\n\nclass BaseParser(object):\n EXCEPTION_CLASSES = {\n 'ERR': {\n 'max number of clients reached': ConnectionError\n },\n 'EXECABORT': ExecAbortError,\n 'LOADING': BusyLoadingError,\n 'NOSCRIPT': NoScriptError,\n 'READONLY': ReadOnlyError,\n }\n\n def parse_error(self, response):\n \"Parse an error response\"\n error_code = response.split(' ')[0]\n if error_code in self.EXCEPTION_CLASSES:\n response = response[len(error_code) + 1:]\n exception_class = self.EXCEPTION_CLASSES[error_code]\n if isinstance(exception_class, dict):\n exception_class = exception_class.get(response, ResponseError)\n return exception_class(response)\n return ResponseError(response)\n\n\nclass SocketBuffer(object):\n def __init__(self, socket, socket_read_size):\n self._sock = socket\n self.socket_read_size = socket_read_size\n self._buffer = io.BytesIO()\n # number of bytes written to the buffer from the socket\n self.bytes_written = 0\n # number of bytes read from the buffer\n self.bytes_read = 0\n\n @property\n def length(self):\n return self.bytes_written - self.bytes_read\n\n def _read_from_socket(self, length=None):\n socket_read_size = self.socket_read_size\n buf = self._buffer\n buf.seek(self.bytes_written)\n marker = 0\n\n try:\n while True:\n data = recv(self._sock, socket_read_size)\n # an empty string indicates the server shutdown the socket\n if isinstance(data, bytes) and len(data) == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n buf.write(data)\n data_length = len(data)\n self.bytes_written += data_length\n marker += data_length\n\n if length is not None and length > marker:\n continue\n break\n except socket.timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(\"Error while reading from socket: %s\" %\n (e.args,))\n\n def read(self, length):\n length = length + 2 # make sure to read the \\r\\n terminator\n # make sure we've read enough data from the socket\n if length > self.length:\n self._read_from_socket(length - self.length)\n\n self._buffer.seek(self.bytes_read)\n data = self._buffer.read(length)\n self.bytes_read += len(data)\n\n # purge the buffer when we've consumed it all so it doesn't\n # grow forever\n if self.bytes_read == self.bytes_written:\n self.purge()\n\n return data[:-2]\n\n def readline(self):\n buf = self._buffer\n buf.seek(self.bytes_read)\n data = buf.readline()\n while not data.endswith(SYM_CRLF):\n # there's more data in the socket that we need\n self._read_from_socket()\n buf.seek(self.bytes_read)\n data = buf.readline()\n\n self.bytes_read += len(data)\n\n # purge the buffer when we've consumed it all so it doesn't\n # grow forever\n if self.bytes_read == self.bytes_written:\n self.purge()\n\n return data[:-2]\n\n def purge(self):\n self._buffer.seek(0)\n self._buffer.truncate()\n self.bytes_written = 0\n self.bytes_read = 0\n\n def close(self):\n try:\n self.purge()\n self._buffer.close()\n except Exception:\n # issue #633 suggests the purge/close somehow raised a\n # BadFileDescriptor error. Perhaps the client ran out of\n # memory or something else? It's probably OK to ignore\n # any error being raised from purge/close since we're\n # removing the reference to the instance below.\n pass\n self._buffer = None\n self._sock = None\n\n\nclass PythonParser(BaseParser):\n \"Plain Python parsing class\"\n def __init__(self, socket_read_size):\n self.socket_read_size = socket_read_size\n self.encoder = None\n self._sock = None\n self._buffer = None\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection):\n \"Called when the socket connects\"\n self._sock = connection._sock\n self._buffer = SocketBuffer(self._sock, self.socket_read_size)\n self.encoder = connection.encoder\n\n def on_disconnect(self):\n \"Called when the socket disconnects\"\n if self._sock is not None:\n self._sock.close()\n self._sock = None\n if self._buffer is not None:\n self._buffer.close()\n self._buffer = None\n self.encoder = None\n\n def can_read(self):\n return self._buffer and bool(self._buffer.length)\n\n def read_response(self):\n response = self._buffer.readline()\n if not response:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n byte, response = byte_to_chr(response[0]), response[1:]\n\n if byte not in ('-', '+', ':', '$', '*'):\n raise InvalidResponse(\"Protocol Error: %s, %s\" %\n (str(byte), str(response)))\n\n # server returned an error\n if byte == '-':\n response = nativestr(response)\n error = self.parse_error(response)\n # if the error is a ConnectionError, raise immediately so the user\n # is notified\n if isinstance(error, ConnectionError):\n raise error\n # otherwise, we're dealing with a ResponseError that might belong\n # inside a pipeline response. the connection's read_response()\n # and/or the pipeline's execute() will raise this error if\n # necessary, so just return the exception instance here.\n return error\n # single value\n elif byte == '+':\n pass\n # int value\n elif byte == ':':\n response = long(response)\n # bulk response\n elif byte == '$':\n length = int(response)\n if length == -1:\n return None\n response = self._buffer.read(length)\n # multi-bulk response\n elif byte == '*':\n length = int(response)\n if length == -1:\n return None\n response = [self.read_response() for i in xrange(length)]\n if isinstance(response, bytes):\n response = self.encoder.decode(response)\n return response\n\n\nclass HiredisParser(BaseParser):\n \"Parser class for connections using Hiredis\"\n def __init__(self, socket_read_size):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not installed\")\n self.socket_read_size = socket_read_size\n\n if HIREDIS_USE_BYTE_BUFFER:\n self._buffer = bytearray(socket_read_size)\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection):\n self._sock = connection._sock\n kwargs = {\n 'protocolError': InvalidResponse,\n 'replyError': self.parse_error,\n }\n\n # hiredis < 0.1.3 doesn't support functions that create exceptions\n if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:\n kwargs['replyError'] = ResponseError\n\n if connection.encoder.decode_responses:\n kwargs['encoding'] = connection.encoder.encoding\n self._reader = hiredis.Reader(**kwargs)\n self._next_response = False\n\n def on_disconnect(self):\n self._sock = None\n self._reader = None\n self._next_response = False\n\n def can_read(self):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n if self._next_response is False:\n self._next_response = self._reader.gets()\n return self._next_response is not False\n\n def read_response(self):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n # _next_response might be cached from a can_read() call\n if self._next_response is not False:\n response = self._next_response\n self._next_response = False\n return response\n\n response = self._reader.gets()\n socket_read_size = self.socket_read_size\n while response is False:\n try:\n if HIREDIS_USE_BYTE_BUFFER:\n bufflen = recv_into(self._sock, self._buffer)\n if bufflen == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n else:\n buffer = recv(self._sock, socket_read_size)\n # an empty string indicates the server shutdown the socket\n if not isinstance(buffer, bytes) or len(buffer) == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n except socket.timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(\"Error while reading from socket: %s\" %\n (e.args,))\n if HIREDIS_USE_BYTE_BUFFER:\n self._reader.feed(self._buffer, 0, bufflen)\n else:\n self._reader.feed(buffer)\n response = self._reader.gets()\n # if an older version of hiredis is installed, we need to attempt\n # to convert ResponseErrors to their appropriate types.\n if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:\n if isinstance(response, ResponseError):\n response = self.parse_error(response.args[0])\n elif isinstance(response, list) and response and \\\n isinstance(response[0], ResponseError):\n response[0] = self.parse_error(response[0].args[0])\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif isinstance(response, list) and response and \\\n isinstance(response[0], ConnectionError):\n raise response[0]\n return response\n\n\nif HIREDIS_AVAILABLE:\n DefaultParser = HiredisParser\nelse:\n DefaultParser = PythonParser\n\n\nclass Connection(object):\n \"Manages TCP communication to and from a Redis server\"\n description_format = \"Connection<host=%(host)s,port=%(port)s,db=%(db)s>\"\n\n def __init__(self, host='localhost', port=6379, db=0, password=None,\n socket_timeout=None, socket_connect_timeout=None,\n socket_keepalive=False, socket_keepalive_options=None,\n socket_type=0, retry_on_timeout=False, encoding='utf-8',\n encoding_errors='strict', decode_responses=False,\n parser_class=DefaultParser, socket_read_size=65536):\n self.pid = os.getpid()\n self.host = host\n self.port = int(port)\n self.db = db\n self.password = password\n self.socket_timeout = socket_timeout\n self.socket_connect_timeout = socket_connect_timeout or socket_timeout\n self.socket_keepalive = socket_keepalive\n self.socket_keepalive_options = socket_keepalive_options or {}\n self.socket_type = socket_type\n self.retry_on_timeout = retry_on_timeout\n self.encoder = Encoder(encoding, encoding_errors, decode_responses)\n self._sock = None\n self._parser = parser_class(socket_read_size=socket_read_size)\n self._description_args = {\n 'host': self.host,\n 'port': self.port,\n 'db': self.db,\n }\n self._connect_callbacks = []\n self._buffer_cutoff = 6000\n\n def __repr__(self):\n return self.description_format % self._description_args\n\n def __del__(self):\n try:\n self.disconnect()\n except Exception:\n pass\n\n def register_connect_callback(self, callback):\n self._connect_callbacks.append(callback)\n\n def clear_connect_callbacks(self):\n self._connect_callbacks = []\n\n def connect(self):\n \"Connects to the Redis server if not already connected\"\n if self._sock:\n return\n try:\n sock = self._connect()\n except socket.timeout:\n raise TimeoutError(\"Timeout connecting to server\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(self._error_message(e))\n\n self._sock = sock\n try:\n self.on_connect()\n except RedisError:\n # clean up after any error in on_connect\n self.disconnect()\n raise\n\n # run any user callbacks. right now the only internal callback\n # is for pubsub channel/pattern resubscription\n for callback in self._connect_callbacks:\n callback(self)\n\n def _connect(self):\n \"Create a TCP socket connection\"\n # we want to mimic what socket.create_connection does to support\n # ipv4/ipv6, but we want to set options prior to calling\n # socket.connect()\n err = None\n for res in socket.getaddrinfo(self.host, self.port, self.socket_type,\n socket.SOCK_STREAM):\n family, socktype, proto, canonname, socket_address = res\n sock = None\n try:\n sock = socket.socket(family, socktype, proto)\n # TCP_NODELAY\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n # TCP_KEEPALIVE\n if self.socket_keepalive:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n for k, v in iteritems(self.socket_keepalive_options):\n sock.setsockopt(socket.SOL_TCP, k, v)\n\n # set the socket_connect_timeout before we connect\n sock.settimeout(self.socket_connect_timeout)\n\n # connect\n sock.connect(socket_address)\n\n # set the socket_timeout now that we're connected\n sock.settimeout(self.socket_timeout)\n return sock\n\n except socket.error as _:\n err = _\n if sock is not None:\n sock.close()\n\n if err is not None:\n raise err\n raise socket.error(\"socket.getaddrinfo returned an empty list\")\n\n def _error_message(self, exception):\n # args for socket.error can either be (errno, \"message\")\n # or just \"message\"\n if len(exception.args) == 1:\n return \"Error connecting to %s:%s. %s.\" % \\\n (self.host, self.port, exception.args[0])\n else:\n return \"Error %s connecting to %s:%s. %s.\" % \\\n (exception.args[0], self.host, self.port, exception.args[1])\n\n def on_connect(self):\n \"Initialize the connection, authenticate and select a database\"\n self._parser.on_connect(self)\n\n # if a password is specified, authenticate\n if self.password:\n self.send_command('AUTH', self.password)\n if nativestr(self.read_response()) != 'OK':\n raise AuthenticationError('Invalid Password')\n\n # if a database is specified, switch to it\n if self.db:\n self.send_command('SELECT', self.db)\n if nativestr(self.read_response()) != 'OK':\n raise ConnectionError('Invalid Database')\n\n def disconnect(self):\n \"Disconnects from the Redis server\"\n self._parser.on_disconnect()\n if self._sock is None:\n return\n try:\n self._sock.shutdown(socket.SHUT_RDWR)\n self._sock.close()\n except socket.error:\n pass\n self._sock = None\n\n def send_packed_command(self, command):\n \"Send an already packed command to the Redis server\"\n if not self._sock:\n self.connect()\n try:\n if isinstance(command, str):\n command = [command]\n for item in command:\n self._sock.sendall(item)\n except socket.timeout:\n self.disconnect()\n raise TimeoutError(\"Timeout writing to socket\")\n except socket.error:\n e = sys.exc_info()[1]\n self.disconnect()\n if len(e.args) == 1:\n errno, errmsg = 'UNKNOWN', e.args[0]\n else:\n errno = e.args[0]\n errmsg = e.args[1]\n raise ConnectionError(\"Error %s while writing to socket. %s.\" %\n (errno, errmsg))\n except Exception as e:\n self.disconnect()\n raise e\n\n def send_command(self, *args):\n \"Pack and send a command to the Redis server\"\n self.send_packed_command(self.pack_command(*args))\n\n def can_read(self, timeout=0):\n \"Poll the socket to see if there's data that can be read.\"\n sock = self._sock\n if not sock:\n self.connect()\n sock = self._sock\n return self._parser.can_read() or \\\n bool(select([sock], [], [], timeout)[0])\n\n def read_response(self):\n \"Read the response from a previously sent command\"\n try:\n response = self._parser.read_response()\n except Exception as e:\n self.disconnect()\n raise e\n if isinstance(response, ResponseError):\n raise response\n return response\n\n def pack_command(self, *args):\n \"Pack a series of arguments into the Redis protocol\"\n output = []\n # the client might have included 1 or more literal arguments in\n # the command name, e.g., 'CONFIG GET'. The Redis server expects these\n # arguments to be sent separately, so split the first argument\n # manually. All of these arguements get wrapped in the Token class\n # to prevent them from being encoded.\n command = args[0]\n if ' ' in command:\n args = tuple(Token.get_token(s)\n for s in command.split()) + args[1:]\n else:\n args = (Token.get_token(command),) + args[1:]\n\n buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))\n\n buffer_cutoff = self._buffer_cutoff\n for arg in imap(self.encoder.encode, args):\n # to avoid large string mallocs, chunk the command into the\n # output list if we're sending large values\n if len(buff) > buffer_cutoff or len(arg) > buffer_cutoff:\n buff = SYM_EMPTY.join(\n (buff, SYM_DOLLAR, str(len(arg)).encode(), SYM_CRLF))\n output.append(buff)\n output.append(arg)\n buff = SYM_CRLF\n else:\n buff = SYM_EMPTY.join(\n (buff, SYM_DOLLAR, str(len(arg)).encode(),\n SYM_CRLF, arg, SYM_CRLF))\n output.append(buff)\n return output\n\n def pack_commands(self, commands):\n \"Pack multiple commands into the Redis protocol\"\n output = []\n pieces = []\n buffer_length = 0\n buffer_cutoff = self._buffer_cutoff\n\n for cmd in commands:\n for chunk in self.pack_command(*cmd):\n chunklen = len(chunk)\n if buffer_length > buffer_cutoff or chunklen > buffer_cutoff:\n output.append(SYM_EMPTY.join(pieces))\n buffer_length = 0\n pieces = []\n\n if chunklen > self._buffer_cutoff:\n output.append(chunk)\n else:\n pieces.append(chunk)\n buffer_length += chunklen\n\n if pieces:\n output.append(SYM_EMPTY.join(pieces))\n return output\n\n\nclass SSLConnection(Connection):\n description_format = \"SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>\"\n\n def __init__(self, ssl_keyfile=None, ssl_certfile=None,\n ssl_cert_reqs='required', ssl_ca_certs=None, **kwargs):\n if not ssl_available:\n raise RedisError(\"Python wasn't built with SSL support\")\n\n super(SSLConnection, self).__init__(**kwargs)\n\n self.keyfile = ssl_keyfile\n self.certfile = ssl_certfile\n if ssl_cert_reqs is None:\n ssl_cert_reqs = ssl.CERT_NONE\n elif isinstance(ssl_cert_reqs, basestring):\n CERT_REQS = {\n 'none': ssl.CERT_NONE,\n 'optional': ssl.CERT_OPTIONAL,\n 'required': ssl.CERT_REQUIRED\n }\n if ssl_cert_reqs not in CERT_REQS:\n raise RedisError(\n \"Invalid SSL Certificate Requirements Flag: %s\" %\n ssl_cert_reqs)\n ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]\n self.cert_reqs = ssl_cert_reqs\n self.ca_certs = ssl_ca_certs\n\n def _connect(self):\n \"Wrap the socket with SSL support\"\n sock = super(SSLConnection, self)._connect()\n sock = ssl.wrap_socket(sock,\n cert_reqs=self.cert_reqs,\n keyfile=self.keyfile,\n certfile=self.certfile,\n ca_certs=self.ca_certs)\n return sock\n\n\nclass UnixDomainSocketConnection(Connection):\n description_format = \"UnixDomainSocketConnection<path=%(path)s,db=%(db)s>\"\n\n def __init__(self, path='', db=0, password=None,\n socket_timeout=None, encoding='utf-8',\n encoding_errors='strict', decode_responses=False,\n retry_on_timeout=False,\n parser_class=DefaultParser, socket_read_size=65536):\n self.pid = os.getpid()\n self.path = path\n self.db = db\n self.password = password\n self.socket_timeout = socket_timeout\n self.retry_on_timeout = retry_on_timeout\n self.encoder = Encoder(encoding, encoding_errors, decode_responses)\n self._sock = None\n self._parser = parser_class(socket_read_size=socket_read_size)\n self._description_args = {\n 'path': self.path,\n 'db': self.db,\n }\n self._connect_callbacks = []\n\n def _connect(self):\n \"Create a Unix domain socket connection\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.settimeout(self.socket_timeout)\n sock.connect(self.path)\n return sock\n\n def _error_message(self, exception):\n # args for socket.error can either be (errno, \"message\")\n # or just \"message\"\n if len(exception.args) == 1:\n return \"Error connecting to unix socket: %s. %s.\" % \\\n (self.path, exception.args[0])\n else:\n return \"Error %s connecting to unix socket: %s. %s.\" % \\\n (exception.args[0], self.path, exception.args[1])\n\n\nFALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')\n\n\ndef to_bool(value):\n if value is None or value == '':\n return None\n if isinstance(value, basestring) and value.upper() in FALSE_STRINGS:\n return False\n return bool(value)\n\n\nURL_QUERY_ARGUMENT_PARSERS = {\n 'socket_timeout': float,\n 'socket_connect_timeout': float,\n 'socket_keepalive': to_bool,\n 'retry_on_timeout': to_bool,\n 'max_connections': int,\n}\n\n\nclass ConnectionPool(object):\n \"Generic connection pool\"\n @classmethod\n def from_url(cls, url, db=None, decode_components=False, **kwargs):\n \"\"\"\n Return a connection pool configured from the given URL.\n\n For example::\n\n redis://[:password]@localhost:6379/0\n rediss://[:password]@localhost:6379/0\n unix://[:password]@/path/to/socket.sock?db=0\n\n Three URL schemes are supported:\n\n - ```redis://``\n <https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a\n normal TCP socket connection\n - ```rediss://``\n <https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates\n a SSL wrapped TCP socket connection\n - ``unix://`` creates a Unix Domain Socket connection\n\n There are several ways to specify a database number. The parse function\n will return the first specified option:\n 1. A ``db`` querystring option, e.g. redis://localhost?db=0\n 2. If using the redis:// scheme, the path argument of the url, e.g.\n redis://localhost/0\n 3. The ``db`` argument to this function.\n\n If none of these options are specified, db=0 is used.\n\n The ``decode_components`` argument allows this function to work with\n percent-encoded URLs. If this argument is set to ``True`` all ``%xx``\n escapes will be replaced by their single-character equivalents after\n the URL has been parsed. This only applies to the ``hostname``,\n ``path``, and ``password`` components.\n\n Any additional querystring arguments and keyword arguments will be\n passed along to the ConnectionPool class's initializer. The querystring\n arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied\n are parsed as float values. The arguments ``socket_keepalive`` and\n ``retry_on_timeout`` are parsed to boolean values that accept\n True/False, Yes/No values to indicate state. Invalid types cause a\n ``UserWarning`` to be raised. In the case of conflicting arguments,\n querystring arguments always win.\n\n \"\"\"\n url = urlparse(url)\n url_options = {}\n\n for name, value in iteritems(parse_qs(url.query)):\n if value and len(value) > 0:\n parser = URL_QUERY_ARGUMENT_PARSERS.get(name)\n if parser:\n try:\n url_options[name] = parser(value[0])\n except (TypeError, ValueError):\n warnings.warn(UserWarning(\n \"Invalid value for `%s` in connection URL.\" % name\n ))\n else:\n url_options[name] = value[0]\n\n if decode_components:\n password = unquote(url.password) if url.password else None\n path = unquote(url.path) if url.path else None\n hostname = unquote(url.hostname) if url.hostname else None\n else:\n password = url.password\n path = url.path\n hostname = url.hostname\n\n # We only support redis:// and unix:// schemes.\n if url.scheme == 'unix':\n url_options.update({\n 'password': password,\n 'path': path,\n 'connection_class': UnixDomainSocketConnection,\n })\n\n else:\n url_options.update({\n 'host': hostname,\n 'port': int(url.port or 6379),\n 'password': password,\n })\n\n # If there's a path argument, use it as the db argument if a\n # querystring value wasn't specified\n if 'db' not in url_options and path:\n try:\n url_options['db'] = int(path.replace('/', ''))\n except (AttributeError, ValueError):\n pass\n\n if url.scheme == 'rediss':\n url_options['connection_class'] = SSLConnection\n\n # last shot at the db value\n url_options['db'] = int(url_options.get('db', db or 0))\n\n # update the arguments from the URL values\n kwargs.update(url_options)\n\n # backwards compatability\n if 'charset' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"charset\" is deprecated. Use \"encoding\" instead'))\n kwargs['encoding'] = kwargs.pop('charset')\n if 'errors' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"errors\" is deprecated. Use \"encoding_errors\" instead'))\n kwargs['encoding_errors'] = kwargs.pop('errors')\n\n return cls(**kwargs)\n\n def __init__(self, connection_class=Connection, max_connections=None,\n **connection_kwargs):\n \"\"\"\n Create a connection pool. If max_connections is set, then this\n object raises redis.ConnectionError when the pool's limit is reached.\n\n By default, TCP connections are created unless connection_class is\n specified. Use redis.UnixDomainSocketConnection for unix sockets.\n\n Any additional keyword arguments are passed to the constructor of\n connection_class.\n \"\"\"\n max_connections = max_connections or 2 ** 31\n if not isinstance(max_connections, (int, long)) or max_connections < 0:\n raise ValueError('\"max_connections\" must be a positive integer')\n\n self.connection_class = connection_class\n self.connection_kwargs = connection_kwargs\n self.max_connections = max_connections\n\n self.reset()\n\n def __repr__(self):\n return \"%s<%s>\" % (\n type(self).__name__,\n self.connection_class.description_format % self.connection_kwargs,\n )\n\n def reset(self):\n self.pid = os.getpid()\n self._created_connections = 0\n self._available_connections = []\n self._in_use_connections = set()\n self._check_lock = threading.Lock()\n\n def _checkpid(self):\n if self.pid != os.getpid():\n with self._check_lock:\n if self.pid == os.getpid():\n # another thread already did the work while we waited\n # on the lock.\n return\n self.disconnect()\n self.reset()\n\n def get_connection(self, command_name, *keys, **options):\n \"Get a connection from the pool\"\n self._checkpid()\n try:\n connection = self._available_connections.pop()\n except IndexError:\n connection = self.make_connection()\n self._in_use_connections.add(connection)\n return connection\n\n def get_encoder(self):\n \"Return an encoder based on encoding settings\"\n kwargs = self.connection_kwargs\n return Encoder(\n encoding=kwargs.get('encoding', 'utf-8'),\n encoding_errors=kwargs.get('encoding_errors', 'strict'),\n decode_responses=kwargs.get('decode_responses', False)\n )\n\n def make_connection(self):\n \"Create a new connection\"\n if self._created_connections >= self.max_connections:\n raise ConnectionError(\"Too many connections\")\n self._created_connections += 1\n return self.connection_class(**self.connection_kwargs)\n\n def release(self, connection):\n \"Releases the connection back to the pool\"\n self._checkpid()\n if connection.pid != self.pid:\n return\n self._in_use_connections.remove(connection)\n self._available_connections.append(connection)\n\n def disconnect(self):\n \"Disconnects all connections in the pool\"\n all_conns = chain(self._available_connections,\n self._in_use_connections)\n for connection in all_conns:\n connection.disconnect()\n\n\nclass BlockingConnectionPool(ConnectionPool):\n \"\"\"\n Thread-safe blocking connection pool::\n\n >>> from redis.client import Redis\n >>> client = Redis(connection_pool=BlockingConnectionPool())\n\n It performs the same function as the default\n ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,\n it maintains a pool of reusable connections that can be shared by\n multiple redis clients (safely across threads if required).\n\n The difference is that, in the event that a client tries to get a\n connection from the pool when all of connections are in use, rather than\n raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default\n ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it\n makes the client wait (\"blocks\") for a specified number of seconds until\n a connection becomes available.\n\n Use ``max_connections`` to increase / decrease the pool size::\n\n >>> pool = BlockingConnectionPool(max_connections=10)\n\n Use ``timeout`` to tell it either how many seconds to wait for a connection\n to become available, or to block forever:\n\n # Block forever.\n >>> pool = BlockingConnectionPool(timeout=None)\n\n # Raise a ``ConnectionError`` after five seconds if a connection is\n # not available.\n >>> pool = BlockingConnectionPool(timeout=5)\n \"\"\"\n def __init__(self, max_connections=50, timeout=20,\n connection_class=Connection, queue_class=LifoQueue,\n **connection_kwargs):\n\n self.queue_class = queue_class\n self.timeout = timeout\n super(BlockingConnectionPool, self).__init__(\n connection_class=connection_class,\n max_connections=max_connections,\n **connection_kwargs)\n\n def reset(self):\n self.pid = os.getpid()\n self._check_lock = threading.Lock()\n\n # Create and fill up a thread safe queue with ``None`` values.\n self.pool = self.queue_class(self.max_connections)\n while True:\n try:\n self.pool.put_nowait(None)\n except Full:\n break\n\n # Keep a list of actual connection instances so that we can\n # disconnect them later.\n self._connections = []\n\n def make_connection(self):\n \"Make a fresh connection.\"\n connection = self.connection_class(**self.connection_kwargs)\n self._connections.append(connection)\n return connection\n\n def get_connection(self, command_name, *keys, **options):\n \"\"\"\n Get a connection, blocking for ``self.timeout`` until a connection\n is available from the pool.\n\n If the connection returned is ``None`` then creates a new connection.\n Because we use a last-in first-out queue, the existing connections\n (having been returned to the pool after the initial ``None`` values\n were added) will be returned before ``None`` values. This means we only\n create new connections when we need to, i.e.: the actual number of\n connections will only increase in response to demand.\n \"\"\"\n # Make sure we haven't changed process.\n self._checkpid()\n\n # Try and get a connection from the pool. If one isn't available within\n # self.timeout then raise a ``ConnectionError``.\n connection = None\n try:\n connection = self.pool.get(block=True, timeout=self.timeout)\n except Empty:\n # Note that this is not caught by the redis client and will be\n # raised unless handled by application code. If you want never to\n raise ConnectionError(\"No connection available.\")\n\n # If the ``connection`` is actually ``None`` then that's a cue to make\n # a new connection to add to the pool.\n if connection is None:\n connection = self.make_connection()\n\n return connection\n\n def release(self, connection):\n \"Releases the connection back to the pool.\"\n # Make sure we haven't changed process.\n self._checkpid()\n if connection.pid != self.pid:\n return\n\n # Put the connection back into the pool.\n try:\n self.pool.put_nowait(connection)\n except Full:\n # perhaps the pool has been reset() after a fork? regardless,\n # we don't want this connection\n pass\n\n def disconnect(self):\n \"Disconnects all connections in the pool.\"\n for connection in self._connections:\n connection.disconnect()\n",
"path": "redis/connection.py"
}
] | [
{
"content": "from __future__ import unicode_literals\nfrom distutils.version import StrictVersion\nfrom itertools import chain\nimport io\nimport os\nimport socket\nimport sys\nimport threading\nimport warnings\n\ntry:\n import ssl\n ssl_available = True\nexcept ImportError:\n ssl_available = False\n\nfrom redis._compat import (xrange, imap, byte_to_chr, unicode, bytes, long,\n nativestr, basestring, iteritems,\n LifoQueue, Empty, Full, urlparse, parse_qs,\n recv, recv_into, select, unquote)\nfrom redis.exceptions import (\n DataError,\n RedisError,\n ConnectionError,\n TimeoutError,\n BusyLoadingError,\n ResponseError,\n InvalidResponse,\n AuthenticationError,\n NoScriptError,\n ExecAbortError,\n ReadOnlyError\n)\nfrom redis.utils import HIREDIS_AVAILABLE\nif HIREDIS_AVAILABLE:\n import hiredis\n\n hiredis_version = StrictVersion(hiredis.__version__)\n HIREDIS_SUPPORTS_CALLABLE_ERRORS = \\\n hiredis_version >= StrictVersion('0.1.3')\n HIREDIS_SUPPORTS_BYTE_BUFFER = \\\n hiredis_version >= StrictVersion('0.1.4')\n\n if not HIREDIS_SUPPORTS_BYTE_BUFFER:\n msg = (\"redis-py works best with hiredis >= 0.1.4. You're running \"\n \"hiredis %s. Please consider upgrading.\" % hiredis.__version__)\n warnings.warn(msg)\n\n HIREDIS_USE_BYTE_BUFFER = True\n # only use byte buffer if hiredis supports it\n if not HIREDIS_SUPPORTS_BYTE_BUFFER:\n HIREDIS_USE_BYTE_BUFFER = False\n\nSYM_STAR = b'*'\nSYM_DOLLAR = b'$'\nSYM_CRLF = b'\\r\\n'\nSYM_EMPTY = b''\n\nSERVER_CLOSED_CONNECTION_ERROR = \"Connection closed by server.\"\n\n\nclass Token(object):\n \"\"\"\n Literal strings in Redis commands, such as the command names and any\n hard-coded arguments are wrapped in this class so we know not to apply\n and encoding rules on them.\n \"\"\"\n\n _cache = {}\n\n @classmethod\n def get_token(cls, value):\n \"Gets a cached token object or creates a new one if not already cached\"\n\n # Use try/except because after running for a short time most tokens\n # should already be cached\n try:\n return cls._cache[value]\n except KeyError:\n token = Token(value)\n cls._cache[value] = token\n return token\n\n def __init__(self, value):\n if isinstance(value, Token):\n value = value.value\n self.value = value\n self.encoded_value = value.encode()\n\n def __repr__(self):\n return self.value\n\n def __str__(self):\n return self.value\n\n\nclass Encoder(object):\n \"Encode strings to bytes and decode bytes to strings\"\n\n def __init__(self, encoding, encoding_errors, decode_responses):\n self.encoding = encoding\n self.encoding_errors = encoding_errors\n self.decode_responses = decode_responses\n\n def encode(self, value):\n \"Return a bytestring representation of the value\"\n if isinstance(value, Token):\n return value.encoded_value\n elif isinstance(value, bytes):\n return value\n elif isinstance(value, bool):\n # special case bool since it is a subclass of int\n raise DataError(\"Invalid input of type: 'bool'. Convert to a \"\n \"byte, string or number first.\")\n elif isinstance(value, float):\n value = repr(value).encode()\n elif isinstance(value, (int, long)):\n # python 2 repr() on longs is '123L', so use str() instead\n value = str(value).encode()\n elif not isinstance(value, basestring):\n # a value we don't know how to deal with. throw an error\n typename = type(value).__name__\n raise DataError(\"Invalid input of type: '%s'. Convert to a \"\n \"byte, string or number first.\" % typename)\n if isinstance(value, unicode):\n value = value.encode(self.encoding, self.encoding_errors)\n return value\n\n def decode(self, value, force=False):\n \"Return a unicode string from the byte representation\"\n if (self.decode_responses or force) and isinstance(value, bytes):\n value = value.decode(self.encoding, self.encoding_errors)\n return value\n\n\nclass BaseParser(object):\n EXCEPTION_CLASSES = {\n 'ERR': {\n 'max number of clients reached': ConnectionError\n },\n 'EXECABORT': ExecAbortError,\n 'LOADING': BusyLoadingError,\n 'NOSCRIPT': NoScriptError,\n 'READONLY': ReadOnlyError,\n }\n\n def parse_error(self, response):\n \"Parse an error response\"\n error_code = response.split(' ')[0]\n if error_code in self.EXCEPTION_CLASSES:\n response = response[len(error_code) + 1:]\n exception_class = self.EXCEPTION_CLASSES[error_code]\n if isinstance(exception_class, dict):\n exception_class = exception_class.get(response, ResponseError)\n return exception_class(response)\n return ResponseError(response)\n\n\nclass SocketBuffer(object):\n def __init__(self, socket, socket_read_size):\n self._sock = socket\n self.socket_read_size = socket_read_size\n self._buffer = io.BytesIO()\n # number of bytes written to the buffer from the socket\n self.bytes_written = 0\n # number of bytes read from the buffer\n self.bytes_read = 0\n\n @property\n def length(self):\n return self.bytes_written - self.bytes_read\n\n def _read_from_socket(self, length=None):\n socket_read_size = self.socket_read_size\n buf = self._buffer\n buf.seek(self.bytes_written)\n marker = 0\n\n try:\n while True:\n data = recv(self._sock, socket_read_size)\n # an empty string indicates the server shutdown the socket\n if isinstance(data, bytes) and len(data) == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n buf.write(data)\n data_length = len(data)\n self.bytes_written += data_length\n marker += data_length\n\n if length is not None and length > marker:\n continue\n break\n except socket.timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(\"Error while reading from socket: %s\" %\n (e.args,))\n\n def read(self, length):\n length = length + 2 # make sure to read the \\r\\n terminator\n # make sure we've read enough data from the socket\n if length > self.length:\n self._read_from_socket(length - self.length)\n\n self._buffer.seek(self.bytes_read)\n data = self._buffer.read(length)\n self.bytes_read += len(data)\n\n # purge the buffer when we've consumed it all so it doesn't\n # grow forever\n if self.bytes_read == self.bytes_written:\n self.purge()\n\n return data[:-2]\n\n def readline(self):\n buf = self._buffer\n buf.seek(self.bytes_read)\n data = buf.readline()\n while not data.endswith(SYM_CRLF):\n # there's more data in the socket that we need\n self._read_from_socket()\n buf.seek(self.bytes_read)\n data = buf.readline()\n\n self.bytes_read += len(data)\n\n # purge the buffer when we've consumed it all so it doesn't\n # grow forever\n if self.bytes_read == self.bytes_written:\n self.purge()\n\n return data[:-2]\n\n def purge(self):\n self._buffer.seek(0)\n self._buffer.truncate()\n self.bytes_written = 0\n self.bytes_read = 0\n\n def close(self):\n try:\n self.purge()\n self._buffer.close()\n except Exception:\n # issue #633 suggests the purge/close somehow raised a\n # BadFileDescriptor error. Perhaps the client ran out of\n # memory or something else? It's probably OK to ignore\n # any error being raised from purge/close since we're\n # removing the reference to the instance below.\n pass\n self._buffer = None\n self._sock = None\n\n\nclass PythonParser(BaseParser):\n \"Plain Python parsing class\"\n def __init__(self, socket_read_size):\n self.socket_read_size = socket_read_size\n self.encoder = None\n self._sock = None\n self._buffer = None\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection):\n \"Called when the socket connects\"\n self._sock = connection._sock\n self._buffer = SocketBuffer(self._sock, self.socket_read_size)\n self.encoder = connection.encoder\n\n def on_disconnect(self):\n \"Called when the socket disconnects\"\n if self._sock is not None:\n self._sock.close()\n self._sock = None\n if self._buffer is not None:\n self._buffer.close()\n self._buffer = None\n self.encoder = None\n\n def can_read(self):\n return self._buffer and bool(self._buffer.length)\n\n def read_response(self):\n response = self._buffer.readline()\n if not response:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n byte, response = byte_to_chr(response[0]), response[1:]\n\n if byte not in ('-', '+', ':', '$', '*'):\n raise InvalidResponse(\"Protocol Error: %s, %s\" %\n (str(byte), str(response)))\n\n # server returned an error\n if byte == '-':\n response = nativestr(response)\n error = self.parse_error(response)\n # if the error is a ConnectionError, raise immediately so the user\n # is notified\n if isinstance(error, ConnectionError):\n raise error\n # otherwise, we're dealing with a ResponseError that might belong\n # inside a pipeline response. the connection's read_response()\n # and/or the pipeline's execute() will raise this error if\n # necessary, so just return the exception instance here.\n return error\n # single value\n elif byte == '+':\n pass\n # int value\n elif byte == ':':\n response = long(response)\n # bulk response\n elif byte == '$':\n length = int(response)\n if length == -1:\n return None\n response = self._buffer.read(length)\n # multi-bulk response\n elif byte == '*':\n length = int(response)\n if length == -1:\n return None\n response = [self.read_response() for i in xrange(length)]\n if isinstance(response, bytes):\n response = self.encoder.decode(response)\n return response\n\n\nclass HiredisParser(BaseParser):\n \"Parser class for connections using Hiredis\"\n def __init__(self, socket_read_size):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not installed\")\n self.socket_read_size = socket_read_size\n\n if HIREDIS_USE_BYTE_BUFFER:\n self._buffer = bytearray(socket_read_size)\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection):\n self._sock = connection._sock\n kwargs = {\n 'protocolError': InvalidResponse,\n 'replyError': self.parse_error,\n }\n\n # hiredis < 0.1.3 doesn't support functions that create exceptions\n if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:\n kwargs['replyError'] = ResponseError\n\n if connection.encoder.decode_responses:\n kwargs['encoding'] = connection.encoder.encoding\n self._reader = hiredis.Reader(**kwargs)\n self._next_response = False\n\n def on_disconnect(self):\n self._sock = None\n self._reader = None\n self._next_response = False\n\n def can_read(self):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n if self._next_response is False:\n self._next_response = self._reader.gets()\n return self._next_response is not False\n\n def read_response(self):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n # _next_response might be cached from a can_read() call\n if self._next_response is not False:\n response = self._next_response\n self._next_response = False\n return response\n\n response = self._reader.gets()\n socket_read_size = self.socket_read_size\n while response is False:\n try:\n if HIREDIS_USE_BYTE_BUFFER:\n bufflen = recv_into(self._sock, self._buffer)\n if bufflen == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n else:\n buffer = recv(self._sock, socket_read_size)\n # an empty string indicates the server shutdown the socket\n if not isinstance(buffer, bytes) or len(buffer) == 0:\n raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)\n except socket.timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(\"Error while reading from socket: %s\" %\n (e.args,))\n if HIREDIS_USE_BYTE_BUFFER:\n self._reader.feed(self._buffer, 0, bufflen)\n else:\n self._reader.feed(buffer)\n response = self._reader.gets()\n # if an older version of hiredis is installed, we need to attempt\n # to convert ResponseErrors to their appropriate types.\n if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:\n if isinstance(response, ResponseError):\n response = self.parse_error(response.args[0])\n elif isinstance(response, list) and response and \\\n isinstance(response[0], ResponseError):\n response[0] = self.parse_error(response[0].args[0])\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif isinstance(response, list) and response and \\\n isinstance(response[0], ConnectionError):\n raise response[0]\n return response\n\n\nif HIREDIS_AVAILABLE:\n DefaultParser = HiredisParser\nelse:\n DefaultParser = PythonParser\n\n\nclass Connection(object):\n \"Manages TCP communication to and from a Redis server\"\n description_format = \"Connection<host=%(host)s,port=%(port)s,db=%(db)s>\"\n\n def __init__(self, host='localhost', port=6379, db=0, password=None,\n socket_timeout=None, socket_connect_timeout=None,\n socket_keepalive=False, socket_keepalive_options=None,\n socket_type=0, retry_on_timeout=False, encoding='utf-8',\n encoding_errors='strict', decode_responses=False,\n parser_class=DefaultParser, socket_read_size=65536):\n self.pid = os.getpid()\n self.host = host\n self.port = int(port)\n self.db = db\n self.password = password\n self.socket_timeout = socket_timeout\n self.socket_connect_timeout = socket_connect_timeout or socket_timeout\n self.socket_keepalive = socket_keepalive\n self.socket_keepalive_options = socket_keepalive_options or {}\n self.socket_type = socket_type\n self.retry_on_timeout = retry_on_timeout\n self.encoder = Encoder(encoding, encoding_errors, decode_responses)\n self._sock = None\n self._parser = parser_class(socket_read_size=socket_read_size)\n self._description_args = {\n 'host': self.host,\n 'port': self.port,\n 'db': self.db,\n }\n self._connect_callbacks = []\n self._buffer_cutoff = 6000\n\n def __repr__(self):\n return self.description_format % self._description_args\n\n def __del__(self):\n try:\n self.disconnect()\n except Exception:\n pass\n\n def register_connect_callback(self, callback):\n self._connect_callbacks.append(callback)\n\n def clear_connect_callbacks(self):\n self._connect_callbacks = []\n\n def connect(self):\n \"Connects to the Redis server if not already connected\"\n if self._sock:\n return\n try:\n sock = self._connect()\n except socket.timeout:\n raise TimeoutError(\"Timeout connecting to server\")\n except socket.error:\n e = sys.exc_info()[1]\n raise ConnectionError(self._error_message(e))\n\n self._sock = sock\n try:\n self.on_connect()\n except RedisError:\n # clean up after any error in on_connect\n self.disconnect()\n raise\n\n # run any user callbacks. right now the only internal callback\n # is for pubsub channel/pattern resubscription\n for callback in self._connect_callbacks:\n callback(self)\n\n def _connect(self):\n \"Create a TCP socket connection\"\n # we want to mimic what socket.create_connection does to support\n # ipv4/ipv6, but we want to set options prior to calling\n # socket.connect()\n err = None\n for res in socket.getaddrinfo(self.host, self.port, self.socket_type,\n socket.SOCK_STREAM):\n family, socktype, proto, canonname, socket_address = res\n sock = None\n try:\n sock = socket.socket(family, socktype, proto)\n # TCP_NODELAY\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n # TCP_KEEPALIVE\n if self.socket_keepalive:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n for k, v in iteritems(self.socket_keepalive_options):\n sock.setsockopt(socket.SOL_TCP, k, v)\n\n # set the socket_connect_timeout before we connect\n sock.settimeout(self.socket_connect_timeout)\n\n # connect\n sock.connect(socket_address)\n\n # set the socket_timeout now that we're connected\n sock.settimeout(self.socket_timeout)\n return sock\n\n except socket.error as _:\n err = _\n if sock is not None:\n sock.close()\n\n if err is not None:\n raise err\n raise socket.error(\"socket.getaddrinfo returned an empty list\")\n\n def _error_message(self, exception):\n # args for socket.error can either be (errno, \"message\")\n # or just \"message\"\n if len(exception.args) == 1:\n return \"Error connecting to %s:%s. %s.\" % \\\n (self.host, self.port, exception.args[0])\n else:\n return \"Error %s connecting to %s:%s. %s.\" % \\\n (exception.args[0], self.host, self.port, exception.args[1])\n\n def on_connect(self):\n \"Initialize the connection, authenticate and select a database\"\n self._parser.on_connect(self)\n\n # if a password is specified, authenticate\n if self.password:\n self.send_command('AUTH', self.password)\n if nativestr(self.read_response()) != 'OK':\n raise AuthenticationError('Invalid Password')\n\n # if a database is specified, switch to it\n if self.db:\n self.send_command('SELECT', self.db)\n if nativestr(self.read_response()) != 'OK':\n raise ConnectionError('Invalid Database')\n\n def disconnect(self):\n \"Disconnects from the Redis server\"\n self._parser.on_disconnect()\n if self._sock is None:\n return\n try:\n self._sock.shutdown(socket.SHUT_RDWR)\n self._sock.close()\n except socket.error:\n pass\n self._sock = None\n\n def send_packed_command(self, command):\n \"Send an already packed command to the Redis server\"\n if not self._sock:\n self.connect()\n try:\n if isinstance(command, str):\n command = [command]\n for item in command:\n self._sock.sendall(item)\n except socket.timeout:\n self.disconnect()\n raise TimeoutError(\"Timeout writing to socket\")\n except socket.error:\n e = sys.exc_info()[1]\n self.disconnect()\n if len(e.args) == 1:\n errno, errmsg = 'UNKNOWN', e.args[0]\n else:\n errno = e.args[0]\n errmsg = e.args[1]\n raise ConnectionError(\"Error %s while writing to socket. %s.\" %\n (errno, errmsg))\n except Exception as e:\n self.disconnect()\n raise e\n\n def send_command(self, *args):\n \"Pack and send a command to the Redis server\"\n self.send_packed_command(self.pack_command(*args))\n\n def can_read(self, timeout=0):\n \"Poll the socket to see if there's data that can be read.\"\n sock = self._sock\n if not sock:\n self.connect()\n sock = self._sock\n return self._parser.can_read() or \\\n bool(select([sock], [], [], timeout)[0])\n\n def read_response(self):\n \"Read the response from a previously sent command\"\n try:\n response = self._parser.read_response()\n except Exception as e:\n self.disconnect()\n raise e\n if isinstance(response, ResponseError):\n raise response\n return response\n\n def pack_command(self, *args):\n \"Pack a series of arguments into the Redis protocol\"\n output = []\n # the client might have included 1 or more literal arguments in\n # the command name, e.g., 'CONFIG GET'. The Redis server expects these\n # arguments to be sent separately, so split the first argument\n # manually. All of these arguements get wrapped in the Token class\n # to prevent them from being encoded.\n command = args[0]\n if ' ' in command:\n args = tuple(Token.get_token(s)\n for s in command.split()) + args[1:]\n else:\n args = (Token.get_token(command),) + args[1:]\n\n buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))\n\n buffer_cutoff = self._buffer_cutoff\n for arg in imap(self.encoder.encode, args):\n # to avoid large string mallocs, chunk the command into the\n # output list if we're sending large values\n if len(buff) > buffer_cutoff or len(arg) > buffer_cutoff:\n buff = SYM_EMPTY.join(\n (buff, SYM_DOLLAR, str(len(arg)).encode(), SYM_CRLF))\n output.append(buff)\n output.append(arg)\n buff = SYM_CRLF\n else:\n buff = SYM_EMPTY.join(\n (buff, SYM_DOLLAR, str(len(arg)).encode(),\n SYM_CRLF, arg, SYM_CRLF))\n output.append(buff)\n return output\n\n def pack_commands(self, commands):\n \"Pack multiple commands into the Redis protocol\"\n output = []\n pieces = []\n buffer_length = 0\n buffer_cutoff = self._buffer_cutoff\n\n for cmd in commands:\n for chunk in self.pack_command(*cmd):\n chunklen = len(chunk)\n if buffer_length > buffer_cutoff or chunklen > buffer_cutoff:\n output.append(SYM_EMPTY.join(pieces))\n buffer_length = 0\n pieces = []\n\n if chunklen > self._buffer_cutoff:\n output.append(chunk)\n else:\n pieces.append(chunk)\n buffer_length += chunklen\n\n if pieces:\n output.append(SYM_EMPTY.join(pieces))\n return output\n\n\nclass SSLConnection(Connection):\n description_format = \"SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>\"\n\n def __init__(self, ssl_keyfile=None, ssl_certfile=None,\n ssl_cert_reqs='required', ssl_ca_certs=None, **kwargs):\n if not ssl_available:\n raise RedisError(\"Python wasn't built with SSL support\")\n\n super(SSLConnection, self).__init__(**kwargs)\n\n self.keyfile = ssl_keyfile\n self.certfile = ssl_certfile\n if ssl_cert_reqs is None:\n ssl_cert_reqs = ssl.CERT_NONE\n elif isinstance(ssl_cert_reqs, basestring):\n CERT_REQS = {\n 'none': ssl.CERT_NONE,\n 'optional': ssl.CERT_OPTIONAL,\n 'required': ssl.CERT_REQUIRED\n }\n if ssl_cert_reqs not in CERT_REQS:\n raise RedisError(\n \"Invalid SSL Certificate Requirements Flag: %s\" %\n ssl_cert_reqs)\n ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]\n self.cert_reqs = ssl_cert_reqs\n self.ca_certs = ssl_ca_certs\n\n def _connect(self):\n \"Wrap the socket with SSL support\"\n sock = super(SSLConnection, self)._connect()\n sock = ssl.wrap_socket(sock,\n cert_reqs=self.cert_reqs,\n keyfile=self.keyfile,\n certfile=self.certfile,\n ca_certs=self.ca_certs)\n return sock\n\n\nclass UnixDomainSocketConnection(Connection):\n description_format = \"UnixDomainSocketConnection<path=%(path)s,db=%(db)s>\"\n\n def __init__(self, path='', db=0, password=None,\n socket_timeout=None, encoding='utf-8',\n encoding_errors='strict', decode_responses=False,\n retry_on_timeout=False,\n parser_class=DefaultParser, socket_read_size=65536):\n self.pid = os.getpid()\n self.path = path\n self.db = db\n self.password = password\n self.socket_timeout = socket_timeout\n self.retry_on_timeout = retry_on_timeout\n self.encoder = Encoder(encoding, encoding_errors, decode_responses)\n self._sock = None\n self._parser = parser_class(socket_read_size=socket_read_size)\n self._description_args = {\n 'path': self.path,\n 'db': self.db,\n }\n self._connect_callbacks = []\n self._buffer_cutoff = 6000\n\n def _connect(self):\n \"Create a Unix domain socket connection\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.settimeout(self.socket_timeout)\n sock.connect(self.path)\n return sock\n\n def _error_message(self, exception):\n # args for socket.error can either be (errno, \"message\")\n # or just \"message\"\n if len(exception.args) == 1:\n return \"Error connecting to unix socket: %s. %s.\" % \\\n (self.path, exception.args[0])\n else:\n return \"Error %s connecting to unix socket: %s. %s.\" % \\\n (exception.args[0], self.path, exception.args[1])\n\n\nFALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')\n\n\ndef to_bool(value):\n if value is None or value == '':\n return None\n if isinstance(value, basestring) and value.upper() in FALSE_STRINGS:\n return False\n return bool(value)\n\n\nURL_QUERY_ARGUMENT_PARSERS = {\n 'socket_timeout': float,\n 'socket_connect_timeout': float,\n 'socket_keepalive': to_bool,\n 'retry_on_timeout': to_bool,\n 'max_connections': int,\n}\n\n\nclass ConnectionPool(object):\n \"Generic connection pool\"\n @classmethod\n def from_url(cls, url, db=None, decode_components=False, **kwargs):\n \"\"\"\n Return a connection pool configured from the given URL.\n\n For example::\n\n redis://[:password]@localhost:6379/0\n rediss://[:password]@localhost:6379/0\n unix://[:password]@/path/to/socket.sock?db=0\n\n Three URL schemes are supported:\n\n - ```redis://``\n <https://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a\n normal TCP socket connection\n - ```rediss://``\n <https://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates\n a SSL wrapped TCP socket connection\n - ``unix://`` creates a Unix Domain Socket connection\n\n There are several ways to specify a database number. The parse function\n will return the first specified option:\n 1. A ``db`` querystring option, e.g. redis://localhost?db=0\n 2. If using the redis:// scheme, the path argument of the url, e.g.\n redis://localhost/0\n 3. The ``db`` argument to this function.\n\n If none of these options are specified, db=0 is used.\n\n The ``decode_components`` argument allows this function to work with\n percent-encoded URLs. If this argument is set to ``True`` all ``%xx``\n escapes will be replaced by their single-character equivalents after\n the URL has been parsed. This only applies to the ``hostname``,\n ``path``, and ``password`` components.\n\n Any additional querystring arguments and keyword arguments will be\n passed along to the ConnectionPool class's initializer. The querystring\n arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied\n are parsed as float values. The arguments ``socket_keepalive`` and\n ``retry_on_timeout`` are parsed to boolean values that accept\n True/False, Yes/No values to indicate state. Invalid types cause a\n ``UserWarning`` to be raised. In the case of conflicting arguments,\n querystring arguments always win.\n\n \"\"\"\n url = urlparse(url)\n url_options = {}\n\n for name, value in iteritems(parse_qs(url.query)):\n if value and len(value) > 0:\n parser = URL_QUERY_ARGUMENT_PARSERS.get(name)\n if parser:\n try:\n url_options[name] = parser(value[0])\n except (TypeError, ValueError):\n warnings.warn(UserWarning(\n \"Invalid value for `%s` in connection URL.\" % name\n ))\n else:\n url_options[name] = value[0]\n\n if decode_components:\n password = unquote(url.password) if url.password else None\n path = unquote(url.path) if url.path else None\n hostname = unquote(url.hostname) if url.hostname else None\n else:\n password = url.password\n path = url.path\n hostname = url.hostname\n\n # We only support redis:// and unix:// schemes.\n if url.scheme == 'unix':\n url_options.update({\n 'password': password,\n 'path': path,\n 'connection_class': UnixDomainSocketConnection,\n })\n\n else:\n url_options.update({\n 'host': hostname,\n 'port': int(url.port or 6379),\n 'password': password,\n })\n\n # If there's a path argument, use it as the db argument if a\n # querystring value wasn't specified\n if 'db' not in url_options and path:\n try:\n url_options['db'] = int(path.replace('/', ''))\n except (AttributeError, ValueError):\n pass\n\n if url.scheme == 'rediss':\n url_options['connection_class'] = SSLConnection\n\n # last shot at the db value\n url_options['db'] = int(url_options.get('db', db or 0))\n\n # update the arguments from the URL values\n kwargs.update(url_options)\n\n # backwards compatability\n if 'charset' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"charset\" is deprecated. Use \"encoding\" instead'))\n kwargs['encoding'] = kwargs.pop('charset')\n if 'errors' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"errors\" is deprecated. Use \"encoding_errors\" instead'))\n kwargs['encoding_errors'] = kwargs.pop('errors')\n\n return cls(**kwargs)\n\n def __init__(self, connection_class=Connection, max_connections=None,\n **connection_kwargs):\n \"\"\"\n Create a connection pool. If max_connections is set, then this\n object raises redis.ConnectionError when the pool's limit is reached.\n\n By default, TCP connections are created unless connection_class is\n specified. Use redis.UnixDomainSocketConnection for unix sockets.\n\n Any additional keyword arguments are passed to the constructor of\n connection_class.\n \"\"\"\n max_connections = max_connections or 2 ** 31\n if not isinstance(max_connections, (int, long)) or max_connections < 0:\n raise ValueError('\"max_connections\" must be a positive integer')\n\n self.connection_class = connection_class\n self.connection_kwargs = connection_kwargs\n self.max_connections = max_connections\n\n self.reset()\n\n def __repr__(self):\n return \"%s<%s>\" % (\n type(self).__name__,\n self.connection_class.description_format % self.connection_kwargs,\n )\n\n def reset(self):\n self.pid = os.getpid()\n self._created_connections = 0\n self._available_connections = []\n self._in_use_connections = set()\n self._check_lock = threading.Lock()\n\n def _checkpid(self):\n if self.pid != os.getpid():\n with self._check_lock:\n if self.pid == os.getpid():\n # another thread already did the work while we waited\n # on the lock.\n return\n self.disconnect()\n self.reset()\n\n def get_connection(self, command_name, *keys, **options):\n \"Get a connection from the pool\"\n self._checkpid()\n try:\n connection = self._available_connections.pop()\n except IndexError:\n connection = self.make_connection()\n self._in_use_connections.add(connection)\n return connection\n\n def get_encoder(self):\n \"Return an encoder based on encoding settings\"\n kwargs = self.connection_kwargs\n return Encoder(\n encoding=kwargs.get('encoding', 'utf-8'),\n encoding_errors=kwargs.get('encoding_errors', 'strict'),\n decode_responses=kwargs.get('decode_responses', False)\n )\n\n def make_connection(self):\n \"Create a new connection\"\n if self._created_connections >= self.max_connections:\n raise ConnectionError(\"Too many connections\")\n self._created_connections += 1\n return self.connection_class(**self.connection_kwargs)\n\n def release(self, connection):\n \"Releases the connection back to the pool\"\n self._checkpid()\n if connection.pid != self.pid:\n return\n self._in_use_connections.remove(connection)\n self._available_connections.append(connection)\n\n def disconnect(self):\n \"Disconnects all connections in the pool\"\n all_conns = chain(self._available_connections,\n self._in_use_connections)\n for connection in all_conns:\n connection.disconnect()\n\n\nclass BlockingConnectionPool(ConnectionPool):\n \"\"\"\n Thread-safe blocking connection pool::\n\n >>> from redis.client import Redis\n >>> client = Redis(connection_pool=BlockingConnectionPool())\n\n It performs the same function as the default\n ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,\n it maintains a pool of reusable connections that can be shared by\n multiple redis clients (safely across threads if required).\n\n The difference is that, in the event that a client tries to get a\n connection from the pool when all of connections are in use, rather than\n raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default\n ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it\n makes the client wait (\"blocks\") for a specified number of seconds until\n a connection becomes available.\n\n Use ``max_connections`` to increase / decrease the pool size::\n\n >>> pool = BlockingConnectionPool(max_connections=10)\n\n Use ``timeout`` to tell it either how many seconds to wait for a connection\n to become available, or to block forever:\n\n # Block forever.\n >>> pool = BlockingConnectionPool(timeout=None)\n\n # Raise a ``ConnectionError`` after five seconds if a connection is\n # not available.\n >>> pool = BlockingConnectionPool(timeout=5)\n \"\"\"\n def __init__(self, max_connections=50, timeout=20,\n connection_class=Connection, queue_class=LifoQueue,\n **connection_kwargs):\n\n self.queue_class = queue_class\n self.timeout = timeout\n super(BlockingConnectionPool, self).__init__(\n connection_class=connection_class,\n max_connections=max_connections,\n **connection_kwargs)\n\n def reset(self):\n self.pid = os.getpid()\n self._check_lock = threading.Lock()\n\n # Create and fill up a thread safe queue with ``None`` values.\n self.pool = self.queue_class(self.max_connections)\n while True:\n try:\n self.pool.put_nowait(None)\n except Full:\n break\n\n # Keep a list of actual connection instances so that we can\n # disconnect them later.\n self._connections = []\n\n def make_connection(self):\n \"Make a fresh connection.\"\n connection = self.connection_class(**self.connection_kwargs)\n self._connections.append(connection)\n return connection\n\n def get_connection(self, command_name, *keys, **options):\n \"\"\"\n Get a connection, blocking for ``self.timeout`` until a connection\n is available from the pool.\n\n If the connection returned is ``None`` then creates a new connection.\n Because we use a last-in first-out queue, the existing connections\n (having been returned to the pool after the initial ``None`` values\n were added) will be returned before ``None`` values. This means we only\n create new connections when we need to, i.e.: the actual number of\n connections will only increase in response to demand.\n \"\"\"\n # Make sure we haven't changed process.\n self._checkpid()\n\n # Try and get a connection from the pool. If one isn't available within\n # self.timeout then raise a ``ConnectionError``.\n connection = None\n try:\n connection = self.pool.get(block=True, timeout=self.timeout)\n except Empty:\n # Note that this is not caught by the redis client and will be\n # raised unless handled by application code. If you want never to\n raise ConnectionError(\"No connection available.\")\n\n # If the ``connection`` is actually ``None`` then that's a cue to make\n # a new connection to add to the pool.\n if connection is None:\n connection = self.make_connection()\n\n return connection\n\n def release(self, connection):\n \"Releases the connection back to the pool.\"\n # Make sure we haven't changed process.\n self._checkpid()\n if connection.pid != self.pid:\n return\n\n # Put the connection back into the pool.\n try:\n self.pool.put_nowait(connection)\n except Full:\n # perhaps the pool has been reset() after a fork? regardless,\n # we don't want this connection\n pass\n\n def disconnect(self):\n \"Disconnects all connections in the pool.\"\n for connection in self._connections:\n connection.disconnect()\n",
"path": "redis/connection.py"
}
] | diff --git a/redis/connection.py b/redis/connection.py
index b38f24c42d..9b949c5c08 100755
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -759,6 +759,7 @@ def __init__(self, path='', db=0, password=None,
'db': self.db,
}
self._connect_callbacks = []
+ self._buffer_cutoff = 6000
def _connect(self):
"Create a Unix domain socket connection"
|
hylang__hy-1201 | fix setup.py
at least hy.extra is missing from package data
| [
{
"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\ntry:\n __version__ = (subprocess.check_output\n ([\"git\", \"describe\", \"--tags\", \"--dirty\"])\n .decode('ASCII').strip()\n .replace('-', '+', 1).replace('-', '.'))\n with open(VERSIONFILE, \"wt\") as o:\n o.write(\"__version__ = {!r}\\n\".format(__version__))\nexcept subprocess.CalledProcessError:\n __version__ = \"unknown\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\ntry:\n __version__ = (subprocess.check_output\n ([\"git\", \"describe\", \"--tags\", \"--dirty\"])\n .decode('ASCII').strip()\n .replace('-', '+', 1).replace('-', '.'))\n with open(VERSIONFILE, \"wt\") as o:\n o.write(\"__version__ = {!r}\\n\".format(__version__))\nexcept subprocess.CalledProcessError:\n __version__ = \"unknown\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n 'hy.extra': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 3fc2be325..a64cb7b80 100755
--- a/setup.py
+++ b/setup.py
@@ -71,6 +71,7 @@
package_data={
'hy.contrib': ['*.hy'],
'hy.core': ['*.hy'],
+ 'hy.extra': ['*.hy'],
},
author="Paul Tagliamonte",
author_email="[email protected]",
|
DataBiosphere__toil-4011 | We're getting the new Werkzeug without asking for it
CI for the merge https://github.com/DataBiosphere/toil/commit/0e256d63cb974a87b8f6b807bf7d23bc9a12fb76 failed at the lint stage, because the merge commit ends up installing a different Werkzeug than the PR's test run did, and the new one has type hints, which upsets MyPy because we now have an unused ignore.
This is because the `connexion` devs finally got access to the `connexion` PyPI package again, and published the current release there. So we started picking up connexion 2.10 instead of 2.5, which is now compatible with Flask 2. So we started installing Flask 2 and Werkzeug 2.
If we're going to import out of Werkzeug, we need to depend on a particular major version of it, so it can't be changed put from under us by pip.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-1130)
┆friendlyId: TOIL-1130
| [
{
"content": "# Modified from: https://github.com/common-workflow-language/workflow-service\nimport functools\nimport json\nimport os\nimport logging\nimport tempfile\nfrom abc import abstractmethod\nfrom typing import Optional, List, Dict, Any, Tuple, Callable\nfrom urllib.parse import urldefrag\n\nimport connexion # type: ignore\nfrom werkzeug.utils import secure_filename # type: ignore\n\nlogger = logging.getLogger(__name__)\n\n\nclass VersionNotImplementedException(Exception):\n \"\"\"\n Raised when the requested workflow version is not implemented.\n \"\"\"\n def __init__(self,\n wf_type: str, version: Optional[str] = None, supported_versions: Optional[List[str]] = None) -> None:\n if version:\n message = (\"workflow_type '{}' requires 'workflow_type_version' to be one of '{}'. \"\n \"Got '{}' instead.\".format(wf_type, str(supported_versions), version))\n else:\n message = f\"workflow_type '{wf_type}' is not supported.\"\n\n super(VersionNotImplementedException, self).__init__(message)\n\n\nclass MalformedRequestException(Exception):\n \"\"\"\n Raised when the request is malformed.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(MalformedRequestException, self).__init__(message)\n\n\nclass WorkflowNotFoundException(Exception):\n \"\"\"\n Raised when the requested run ID is not found.\n \"\"\"\n def __init__(self) -> None:\n super(WorkflowNotFoundException, self).__init__(\"The requested workflow run wasn't found.\")\n\n\nclass WorkflowConflictException(Exception):\n \"\"\"\n Raised when the requested workflow is not in the expected state.\n \"\"\"\n def __init__(self, run_id: str):\n super(WorkflowConflictException, self).__init__(f\"Workflow {run_id} exists when it shouldn't.\")\n\n\nclass OperationForbidden(Exception):\n \"\"\"\n Raised when the request is forbidden.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(OperationForbidden, self).__init__(message)\n\n\nclass WorkflowExecutionException(Exception):\n \"\"\"\n Raised when an internal error occurred during the execution of the workflow.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(WorkflowExecutionException, self).__init__(message)\n\n\ndef handle_errors(func: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"\n This decorator catches errors from the wrapped function and returns a JSON\n formatted error message with the appropriate status code defined by the\n GA4GH WES spec.\n \"\"\"\n\n def error(msg: Any, code: int = 500) -> Tuple[Dict[str, Any], int]:\n logger.warning(f\"Exception raised when calling '{func.__name__}()':\", exc_info=True)\n return {\"msg\": str(msg), \"status_code\": code}, code\n\n @functools.wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n try:\n return func(*args, **kwargs)\n except MalformedRequestException as e:\n return error(e, code=400)\n except VersionNotImplementedException as e:\n return error(e, code=400)\n except OperationForbidden as e:\n return error(e, code=403)\n except (FileNotFoundError, WorkflowNotFoundException) as e:\n return error(e, code=404)\n except WorkflowConflictException as e:\n return error(e, code=400)\n except WorkflowExecutionException as e:\n return error(e, code=500)\n except Exception as e:\n return error(e, code=500)\n\n return wrapper\n\n\nclass WESBackend:\n \"\"\"\n A class to represent a GA4GH Workflow Execution Service (WES) API backend.\n Intended to be inherited. Subclasses should implement all abstract methods\n to handle user requests when they hit different endpoints.\n \"\"\"\n\n def __init__(self, options: List[str]):\n \"\"\"\n :param options: A list of default engine options to use when executing\n a workflow. Example options:\n [\"--logLevel=CRITICAL\",\"--workDir=/path/to/dir\",\n \"--tag=Name=default\", \"--tag=Owner=shared\", ...]\n \"\"\"\n self.options = options or []\n\n def resolve_operation_id(self, operation_id: str) -> Any:\n \"\"\"\n Map an operationId defined in the OpenAPI or swagger yaml file to a\n function.\n\n :param operation_id: The operation ID defined in the specification.\n :returns: A function that should be called when the given endpoint is\n reached.\n \"\"\"\n return getattr(self, operation_id.split(\".\")[-1])\n\n @abstractmethod\n def get_service_info(self) -> Dict[str, Any]:\n \"\"\"\n Get information about the Workflow Execution Service.\n\n GET /service-info\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def list_runs(self, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n List the workflow runs.\n\n GET /runs\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def run_workflow(self) -> Dict[str, str]:\n \"\"\"\n Run a workflow. This endpoint creates a new workflow run and returns\n a `RunId` to monitor its progress.\n\n POST /runs\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_run_log(self, run_id: str) -> Dict[str, Any]:\n \"\"\"\n Get detailed info about a workflow run.\n\n GET /runs/{run_id}\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def cancel_run(self, run_id: str) -> Dict[str, str]:\n \"\"\"\n Cancel a running workflow.\n\n POST /runs/{run_id}/cancel\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_run_status(self, run_id: str) -> Dict[str, str]:\n \"\"\"\n Get quick status info about a workflow run, returning a simple result\n with the overall state of the workflow run.\n\n GET /runs/{run_id}/status\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def log_for_run(run_id: Optional[str], message: str) -> None:\n if run_id:\n logging.info(\"Workflow %s: %s\", run_id, message)\n else:\n logging.info(message)\n\n @staticmethod\n def secure_path(path: str) -> str:\n return os.path.join(*[str(secure_filename(p)) for p in path.split(\"/\") if p not in (\"\", \".\", \"..\")])\n\n def collect_attachments(self, run_id: Optional[str], temp_dir: Optional[str]) -> Tuple[str, Dict[str, Any]]:\n \"\"\"\n Collect attachments from the current request by staging uploaded files\n to temp_dir, and return the temp_dir and parsed body of the request.\n\n :param run_id: The run ID for logging.\n :param temp_dir: The directory where uploaded files should be staged.\n If None, a temporary directory is created.\n \"\"\"\n if not temp_dir:\n temp_dir = tempfile.mkdtemp()\n body = {}\n has_attachments = False\n for key, ls in connexion.request.files.lists():\n try:\n for value in ls:\n # uploaded files that are required to execute the workflow\n if key == \"workflow_attachment\":\n # guard against maliciously constructed filenames\n dest = os.path.join(temp_dir, self.secure_path(value.filename))\n if not os.path.isdir(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n self.log_for_run(run_id, f\"Staging attachment '{value.filename}' to '{dest}'\")\n value.save(dest)\n has_attachments = True\n body[key] = f\"file://{temp_dir}\" # Reference to temp working dir.\n\n elif key in (\"workflow_params\", \"tags\", \"workflow_engine_parameters\"):\n content = value.read()\n body[key] = json.loads(content.decode(\"utf-8\"))\n else:\n body[key] = value.read().decode()\n except Exception as e:\n raise MalformedRequestException(f\"Error reading parameter '{key}': {e}\")\n\n for key, ls in connexion.request.form.lists():\n try:\n for value in ls:\n if not value:\n continue\n if key in (\"workflow_params\", \"tags\", \"workflow_engine_parameters\"):\n body[key] = json.loads(value)\n else:\n body[key] = value\n except Exception as e:\n raise MalformedRequestException(f\"Error reading parameter '{key}': {e}\")\n\n if \"workflow_url\" in body:\n url, ref = urldefrag(body[\"workflow_url\"])\n if \":\" not in url:\n if not has_attachments:\n raise MalformedRequestException(\"Relative 'workflow_url' but missing 'workflow_attachment'\")\n body[\"workflow_url\"] = self.secure_path(url) # keep this relative\n if ref:\n # append \"#ref\" after the url\n body[\"workflow_url\"] += \"#\" + self.secure_path(ref)\n self.log_for_run(run_id, \"Using workflow_url '%s'\" % body.get(\"workflow_url\"))\n else:\n raise MalformedRequestException(\"Missing 'workflow_url' in submission\")\n if \"workflow_params\" not in body:\n raise MalformedRequestException(\"Missing 'workflow_params' in submission\")\n\n return temp_dir, body\n",
"path": "src/toil/server/wes/abstract_backend.py"
}
] | [
{
"content": "# Modified from: https://github.com/common-workflow-language/workflow-service\nimport functools\nimport json\nimport os\nimport logging\nimport tempfile\nfrom abc import abstractmethod\nfrom typing import Optional, List, Dict, Any, Tuple, Callable\nfrom urllib.parse import urldefrag\n\nimport connexion # type: ignore\nfrom werkzeug.utils import secure_filename\n\nlogger = logging.getLogger(__name__)\n\n\nclass VersionNotImplementedException(Exception):\n \"\"\"\n Raised when the requested workflow version is not implemented.\n \"\"\"\n def __init__(self,\n wf_type: str, version: Optional[str] = None, supported_versions: Optional[List[str]] = None) -> None:\n if version:\n message = (\"workflow_type '{}' requires 'workflow_type_version' to be one of '{}'. \"\n \"Got '{}' instead.\".format(wf_type, str(supported_versions), version))\n else:\n message = f\"workflow_type '{wf_type}' is not supported.\"\n\n super(VersionNotImplementedException, self).__init__(message)\n\n\nclass MalformedRequestException(Exception):\n \"\"\"\n Raised when the request is malformed.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(MalformedRequestException, self).__init__(message)\n\n\nclass WorkflowNotFoundException(Exception):\n \"\"\"\n Raised when the requested run ID is not found.\n \"\"\"\n def __init__(self) -> None:\n super(WorkflowNotFoundException, self).__init__(\"The requested workflow run wasn't found.\")\n\n\nclass WorkflowConflictException(Exception):\n \"\"\"\n Raised when the requested workflow is not in the expected state.\n \"\"\"\n def __init__(self, run_id: str):\n super(WorkflowConflictException, self).__init__(f\"Workflow {run_id} exists when it shouldn't.\")\n\n\nclass OperationForbidden(Exception):\n \"\"\"\n Raised when the request is forbidden.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(OperationForbidden, self).__init__(message)\n\n\nclass WorkflowExecutionException(Exception):\n \"\"\"\n Raised when an internal error occurred during the execution of the workflow.\n \"\"\"\n def __init__(self, message: str) -> None:\n super(WorkflowExecutionException, self).__init__(message)\n\n\ndef handle_errors(func: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"\n This decorator catches errors from the wrapped function and returns a JSON\n formatted error message with the appropriate status code defined by the\n GA4GH WES spec.\n \"\"\"\n\n def error(msg: Any, code: int = 500) -> Tuple[Dict[str, Any], int]:\n logger.warning(f\"Exception raised when calling '{func.__name__}()':\", exc_info=True)\n return {\"msg\": str(msg), \"status_code\": code}, code\n\n @functools.wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n try:\n return func(*args, **kwargs)\n except MalformedRequestException as e:\n return error(e, code=400)\n except VersionNotImplementedException as e:\n return error(e, code=400)\n except OperationForbidden as e:\n return error(e, code=403)\n except (FileNotFoundError, WorkflowNotFoundException) as e:\n return error(e, code=404)\n except WorkflowConflictException as e:\n return error(e, code=400)\n except WorkflowExecutionException as e:\n return error(e, code=500)\n except Exception as e:\n return error(e, code=500)\n\n return wrapper\n\n\nclass WESBackend:\n \"\"\"\n A class to represent a GA4GH Workflow Execution Service (WES) API backend.\n Intended to be inherited. Subclasses should implement all abstract methods\n to handle user requests when they hit different endpoints.\n \"\"\"\n\n def __init__(self, options: List[str]):\n \"\"\"\n :param options: A list of default engine options to use when executing\n a workflow. Example options:\n [\"--logLevel=CRITICAL\",\"--workDir=/path/to/dir\",\n \"--tag=Name=default\", \"--tag=Owner=shared\", ...]\n \"\"\"\n self.options = options or []\n\n def resolve_operation_id(self, operation_id: str) -> Any:\n \"\"\"\n Map an operationId defined in the OpenAPI or swagger yaml file to a\n function.\n\n :param operation_id: The operation ID defined in the specification.\n :returns: A function that should be called when the given endpoint is\n reached.\n \"\"\"\n return getattr(self, operation_id.split(\".\")[-1])\n\n @abstractmethod\n def get_service_info(self) -> Dict[str, Any]:\n \"\"\"\n Get information about the Workflow Execution Service.\n\n GET /service-info\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def list_runs(self, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n List the workflow runs.\n\n GET /runs\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def run_workflow(self) -> Dict[str, str]:\n \"\"\"\n Run a workflow. This endpoint creates a new workflow run and returns\n a `RunId` to monitor its progress.\n\n POST /runs\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_run_log(self, run_id: str) -> Dict[str, Any]:\n \"\"\"\n Get detailed info about a workflow run.\n\n GET /runs/{run_id}\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def cancel_run(self, run_id: str) -> Dict[str, str]:\n \"\"\"\n Cancel a running workflow.\n\n POST /runs/{run_id}/cancel\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def get_run_status(self, run_id: str) -> Dict[str, str]:\n \"\"\"\n Get quick status info about a workflow run, returning a simple result\n with the overall state of the workflow run.\n\n GET /runs/{run_id}/status\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def log_for_run(run_id: Optional[str], message: str) -> None:\n if run_id:\n logging.info(\"Workflow %s: %s\", run_id, message)\n else:\n logging.info(message)\n\n @staticmethod\n def secure_path(path: str) -> str:\n return os.path.join(*[str(secure_filename(p)) for p in path.split(\"/\") if p not in (\"\", \".\", \"..\")])\n\n def collect_attachments(self, run_id: Optional[str], temp_dir: Optional[str]) -> Tuple[str, Dict[str, Any]]:\n \"\"\"\n Collect attachments from the current request by staging uploaded files\n to temp_dir, and return the temp_dir and parsed body of the request.\n\n :param run_id: The run ID for logging.\n :param temp_dir: The directory where uploaded files should be staged.\n If None, a temporary directory is created.\n \"\"\"\n if not temp_dir:\n temp_dir = tempfile.mkdtemp()\n body = {}\n has_attachments = False\n for key, ls in connexion.request.files.lists():\n try:\n for value in ls:\n # uploaded files that are required to execute the workflow\n if key == \"workflow_attachment\":\n # guard against maliciously constructed filenames\n dest = os.path.join(temp_dir, self.secure_path(value.filename))\n if not os.path.isdir(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n self.log_for_run(run_id, f\"Staging attachment '{value.filename}' to '{dest}'\")\n value.save(dest)\n has_attachments = True\n body[key] = f\"file://{temp_dir}\" # Reference to temp working dir.\n\n elif key in (\"workflow_params\", \"tags\", \"workflow_engine_parameters\"):\n content = value.read()\n body[key] = json.loads(content.decode(\"utf-8\"))\n else:\n body[key] = value.read().decode()\n except Exception as e:\n raise MalformedRequestException(f\"Error reading parameter '{key}': {e}\")\n\n for key, ls in connexion.request.form.lists():\n try:\n for value in ls:\n if not value:\n continue\n if key in (\"workflow_params\", \"tags\", \"workflow_engine_parameters\"):\n body[key] = json.loads(value)\n else:\n body[key] = value\n except Exception as e:\n raise MalformedRequestException(f\"Error reading parameter '{key}': {e}\")\n\n if \"workflow_url\" in body:\n url, ref = urldefrag(body[\"workflow_url\"])\n if \":\" not in url:\n if not has_attachments:\n raise MalformedRequestException(\"Relative 'workflow_url' but missing 'workflow_attachment'\")\n body[\"workflow_url\"] = self.secure_path(url) # keep this relative\n if ref:\n # append \"#ref\" after the url\n body[\"workflow_url\"] += \"#\" + self.secure_path(ref)\n self.log_for_run(run_id, \"Using workflow_url '%s'\" % body.get(\"workflow_url\"))\n else:\n raise MalformedRequestException(\"Missing 'workflow_url' in submission\")\n if \"workflow_params\" not in body:\n raise MalformedRequestException(\"Missing 'workflow_params' in submission\")\n\n return temp_dir, body\n",
"path": "src/toil/server/wes/abstract_backend.py"
}
] | diff --git a/requirements-server.txt b/requirements-server.txt
index 5657b16453..8d0d9a790b 100644
--- a/requirements-server.txt
+++ b/requirements-server.txt
@@ -1,4 +1,6 @@
-connexion[swagger-ui]>=2.5.1, <3
+connexion[swagger-ui]>=2.10.0, <3
+flask>=2.0,<3
+werkzeug>=2.0,<3
flask-cors==3.0.10
gunicorn==20.1.0
celery>=5.1.0, <6
diff --git a/src/toil/server/wes/abstract_backend.py b/src/toil/server/wes/abstract_backend.py
index 95fb5427b2..c340c5969c 100644
--- a/src/toil/server/wes/abstract_backend.py
+++ b/src/toil/server/wes/abstract_backend.py
@@ -9,7 +9,7 @@
from urllib.parse import urldefrag
import connexion # type: ignore
-from werkzeug.utils import secure_filename # type: ignore
+from werkzeug.utils import secure_filename
logger = logging.getLogger(__name__)
|
mitmproxy__mitmproxy-1150 | ServerException instead of ProxyServerError
##### Steps to reproduce the problem:
```
>>> from libmproxy.proxy.server import ProxyServer
>>> from libmproxy.proxy.config import ProxyConfig
>>> ProxyServer(ProxyConfig(port=80))
(...)
ServerException: Error starting proxy server: error(13, 'Permission denied')
```
##### What is the expected behavior?
According to the documentation:
```
>>> ProxyServer?
Type: type
String form: <class 'libmproxy.proxy.server.ProxyServer'>
File: /usr/lib/python2.7/dist-packages/libmproxy/proxy/server.py
Init definition: ProxyServer(self, config)
Docstring: <no docstring>
Init docstring: Raises ProxyServerError if there's a startup problem.
```
the expected behavior is
```
>>> ProxyServer(ProxyConfig(port=80))
(...)
ProxyServerError: Error starting proxy server: error(13, 'Permission denied')
```
##### What went wrong?
Maybe the documentation is wrong?
##### Any other comments?
Nope.
---
Mitmproxy Version: 0.15-2
Operating System: Debian Sid.
| [
{
"content": "from __future__ import (absolute_import, print_function, division)\n\nimport traceback\nimport sys\nimport socket\nimport six\n\nfrom netlib import tcp\nfrom netlib.exceptions import TcpException\nfrom netlib.http.http1 import assemble_response\nfrom ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill\nfrom ..models import ClientConnection, make_error_response\nfrom .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy\nfrom .root_context import RootContext, Log\n\n\nclass DummyServer:\n bound = False\n\n def __init__(self, config):\n self.config = config\n\n def set_channel(self, channel):\n pass\n\n def serve_forever(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ProxyServer(tcp.TCPServer):\n allow_reuse_address = True\n bound = True\n\n def __init__(self, config):\n \"\"\"\n Raises ProxyServerError if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n super(ProxyServer, self).__init__((config.host, config.port))\n except socket.error as e:\n six.reraise(\n ServerException,\n ServerException('Error starting proxy server: ' + repr(e)),\n sys.exc_info()[2]\n )\n self.channel = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def handle_client_connection(self, conn, client_address):\n h = ConnectionHandler(\n conn,\n client_address,\n self.config,\n self.channel\n )\n h.handle()\n\n\nclass ConnectionHandler(object):\n\n def __init__(self, client_conn, client_address, config, channel):\n self.config = config\n \"\"\"@type: mitmproxy.proxy.config.ProxyConfig\"\"\"\n self.client_conn = ClientConnection(\n client_conn,\n client_address,\n None)\n \"\"\"@type: mitmproxy.proxy.connection.ClientConnection\"\"\"\n self.channel = channel\n \"\"\"@type: mitmproxy.controller.Channel\"\"\"\n\n def _create_root_layer(self):\n root_context = RootContext(\n self.client_conn,\n self.config,\n self.channel\n )\n\n mode = self.config.mode\n if mode == \"upstream\":\n return HttpUpstreamProxy(\n root_context,\n self.config.upstream_server.address\n )\n elif mode == \"transparent\":\n return TransparentProxy(root_context)\n elif mode == \"reverse\":\n server_tls = self.config.upstream_server.scheme == \"https\"\n return ReverseProxy(\n root_context,\n self.config.upstream_server.address,\n server_tls\n )\n elif mode == \"socks5\":\n return Socks5Proxy(root_context)\n elif mode == \"regular\":\n return HttpProxy(root_context)\n elif callable(mode): # pragma: no cover\n return mode(root_context)\n else: # pragma: no cover\n raise ValueError(\"Unknown proxy mode: %s\" % mode)\n\n def handle(self):\n self.log(\"clientconnect\", \"info\")\n\n root_layer = self._create_root_layer()\n\n try:\n root_layer = self.channel.ask(\"clientconnect\", root_layer)\n root_layer()\n except Kill:\n self.log(\"Connection killed\", \"info\")\n except ProtocolException as e:\n\n if isinstance(e, ClientHandshakeException):\n self.log(\n \"Client Handshake failed. \"\n \"The client may not trust the proxy's certificate for {}.\".format(e.server),\n \"error\"\n )\n self.log(repr(e), \"debug\")\n else:\n self.log(repr(e), \"info\")\n\n self.log(traceback.format_exc(), \"debug\")\n # If an error propagates to the topmost level,\n # we send an HTTP error response, which is both\n # understandable by HTTP clients and humans.\n try:\n error_response = make_error_response(502, repr(e))\n self.client_conn.send(assemble_response(error_response))\n except TcpException:\n pass\n except Exception:\n self.log(traceback.format_exc(), \"error\")\n print(traceback.format_exc(), file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.log(\"clientdisconnect\", \"info\")\n self.channel.tell(\"clientdisconnect\", root_layer)\n self.client_conn.finish()\n\n def log(self, msg, level):\n msg = \"{}: {}\".format(repr(self.client_conn.address), msg)\n self.channel.tell(\"log\", Log(msg, level))\n",
"path": "mitmproxy/proxy/server.py"
}
] | [
{
"content": "from __future__ import (absolute_import, print_function, division)\n\nimport traceback\nimport sys\nimport socket\nimport six\n\nfrom netlib import tcp\nfrom netlib.exceptions import TcpException\nfrom netlib.http.http1 import assemble_response\nfrom ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill\nfrom ..models import ClientConnection, make_error_response\nfrom .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy\nfrom .root_context import RootContext, Log\n\n\nclass DummyServer:\n bound = False\n\n def __init__(self, config):\n self.config = config\n\n def set_channel(self, channel):\n pass\n\n def serve_forever(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ProxyServer(tcp.TCPServer):\n allow_reuse_address = True\n bound = True\n\n def __init__(self, config):\n \"\"\"\n Raises ServerException if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n super(ProxyServer, self).__init__((config.host, config.port))\n except socket.error as e:\n six.reraise(\n ServerException,\n ServerException('Error starting proxy server: ' + repr(e)),\n sys.exc_info()[2]\n )\n self.channel = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def handle_client_connection(self, conn, client_address):\n h = ConnectionHandler(\n conn,\n client_address,\n self.config,\n self.channel\n )\n h.handle()\n\n\nclass ConnectionHandler(object):\n\n def __init__(self, client_conn, client_address, config, channel):\n self.config = config\n \"\"\"@type: mitmproxy.proxy.config.ProxyConfig\"\"\"\n self.client_conn = ClientConnection(\n client_conn,\n client_address,\n None)\n \"\"\"@type: mitmproxy.proxy.connection.ClientConnection\"\"\"\n self.channel = channel\n \"\"\"@type: mitmproxy.controller.Channel\"\"\"\n\n def _create_root_layer(self):\n root_context = RootContext(\n self.client_conn,\n self.config,\n self.channel\n )\n\n mode = self.config.mode\n if mode == \"upstream\":\n return HttpUpstreamProxy(\n root_context,\n self.config.upstream_server.address\n )\n elif mode == \"transparent\":\n return TransparentProxy(root_context)\n elif mode == \"reverse\":\n server_tls = self.config.upstream_server.scheme == \"https\"\n return ReverseProxy(\n root_context,\n self.config.upstream_server.address,\n server_tls\n )\n elif mode == \"socks5\":\n return Socks5Proxy(root_context)\n elif mode == \"regular\":\n return HttpProxy(root_context)\n elif callable(mode): # pragma: no cover\n return mode(root_context)\n else: # pragma: no cover\n raise ValueError(\"Unknown proxy mode: %s\" % mode)\n\n def handle(self):\n self.log(\"clientconnect\", \"info\")\n\n root_layer = self._create_root_layer()\n\n try:\n root_layer = self.channel.ask(\"clientconnect\", root_layer)\n root_layer()\n except Kill:\n self.log(\"Connection killed\", \"info\")\n except ProtocolException as e:\n\n if isinstance(e, ClientHandshakeException):\n self.log(\n \"Client Handshake failed. \"\n \"The client may not trust the proxy's certificate for {}.\".format(e.server),\n \"error\"\n )\n self.log(repr(e), \"debug\")\n else:\n self.log(repr(e), \"info\")\n\n self.log(traceback.format_exc(), \"debug\")\n # If an error propagates to the topmost level,\n # we send an HTTP error response, which is both\n # understandable by HTTP clients and humans.\n try:\n error_response = make_error_response(502, repr(e))\n self.client_conn.send(assemble_response(error_response))\n except TcpException:\n pass\n except Exception:\n self.log(traceback.format_exc(), \"error\")\n print(traceback.format_exc(), file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.log(\"clientdisconnect\", \"info\")\n self.channel.tell(\"clientdisconnect\", root_layer)\n self.client_conn.finish()\n\n def log(self, msg, level):\n msg = \"{}: {}\".format(repr(self.client_conn.address), msg)\n self.channel.tell(\"log\", Log(msg, level))\n",
"path": "mitmproxy/proxy/server.py"
}
] | diff --git a/mitmproxy/proxy/server.py b/mitmproxy/proxy/server.py
index 4304bd0be3..8483d3df6c 100644
--- a/mitmproxy/proxy/server.py
+++ b/mitmproxy/proxy/server.py
@@ -36,7 +36,7 @@ class ProxyServer(tcp.TCPServer):
def __init__(self, config):
"""
- Raises ProxyServerError if there's a startup problem.
+ Raises ServerException if there's a startup problem.
"""
self.config = config
try:
|
huggingface__transformers-9379 | Improve coverage of the documentation
Currently, some public classes are not documented anywhere because we didn't create the corresponding doc pages. Those missing pages are:
- Benchmark classes
- Bert Japanese
- Data collators
If someone feels like working on one of those, please tag yourself with a comment on this issue. Once the objects are properly documented, they can be removed from the `SHOULD_BE_DOCUMENTED` constant in [this file](https://github.com/huggingface/transformers/blob/1310e1a758edc8e89ec363db76863c771fbeb1de/utils/check_repo.py#L374).
| [
{
"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport inspect\nimport os\nimport re\nfrom pathlib import Path\n\n\n# All paths are set with the intent you should run this script from the root of the repo with the command\n# python utils/check_repo.py\nPATH_TO_TRANSFORMERS = \"src/transformers\"\nPATH_TO_TESTS = \"tests\"\nPATH_TO_DOC = \"docs/source\"\n\n# Update this list for models that are not tested with a comment explaining the reason it should not be.\n# Being in this list is an exception and should **not** be the rule.\nIGNORE_NON_TESTED = [\n # models to ignore for not tested\n \"BartDecoder\", # Building part of bigger (tested) model.\n \"BartEncoder\", # Building part of bigger (tested) model.\n \"BertLMHeadModel\", # Needs to be setup as decoder.\n \"DPREncoder\", # Building part of bigger (tested) model.\n \"DPRSpanPredictor\", # Building part of bigger (tested) model.\n \"ProphetNetDecoderWrapper\", # Building part of bigger (tested) model.\n \"ReformerForMaskedLM\", # Needs to be setup as decoder.\n \"T5Stack\", # Building part of bigger (tested) model.\n \"TFDPREncoder\", # Building part of bigger (tested) model.\n \"TFDPRSpanPredictor\", # Building part of bigger (tested) model.\n \"TFElectraMainLayer\", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)\n \"TFRobertaForMultipleChoice\", # TODO: fix\n]\n\n# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't\n# trigger the common tests.\nTEST_FILES_WITH_NO_COMMON_TESTS = [\n \"test_modeling_camembert.py\",\n \"test_modeling_flax_bert.py\",\n \"test_modeling_flax_roberta.py\",\n \"test_modeling_mbart.py\",\n \"test_modeling_mt5.py\",\n \"test_modeling_pegasus.py\",\n \"test_modeling_tf_camembert.py\",\n \"test_modeling_tf_mt5.py\",\n \"test_modeling_tf_xlm_roberta.py\",\n \"test_modeling_xlm_prophetnet.py\",\n \"test_modeling_xlm_roberta.py\",\n]\n\n# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and\n# should **not** be the rule.\nIGNORE_NON_AUTO_CONFIGURED = [\n # models to ignore for model xxx mapping\n \"BartDecoder\",\n \"BartEncoder\",\n \"DPRContextEncoder\",\n \"DPREncoder\",\n \"DPRReader\",\n \"DPRSpanPredictor\",\n \"FlaubertForQuestionAnswering\",\n \"FunnelBaseModel\",\n \"GPT2DoubleHeadsModel\",\n \"MT5EncoderModel\",\n \"OpenAIGPTDoubleHeadsModel\",\n \"ProphetNetDecoder\",\n \"ProphetNetEncoder\",\n \"ProphetNetDecoderWrapper\",\n \"RagModel\",\n \"RagSequenceForGeneration\",\n \"RagTokenForGeneration\",\n \"T5Stack\",\n \"T5EncoderModel\",\n \"TFDPRContextEncoder\",\n \"TFDPREncoder\",\n \"TFDPRReader\",\n \"TFDPRSpanPredictor\",\n \"TFFunnelBaseModel\",\n \"TFGPT2DoubleHeadsModel\",\n \"TFMT5EncoderModel\",\n \"TFOpenAIGPTDoubleHeadsModel\",\n \"TFT5EncoderModel\",\n \"XLMForQuestionAnswering\",\n \"XLMProphetNetDecoder\",\n \"XLMProphetNetEncoder\",\n \"XLNetForQuestionAnswering\",\n]\n\n# This is to make sure the transformers module imported is the one in the repo.\nspec = importlib.util.spec_from_file_location(\n \"transformers\",\n os.path.join(PATH_TO_TRANSFORMERS, \"__init__.py\"),\n submodule_search_locations=[PATH_TO_TRANSFORMERS],\n)\ntransformers = spec.loader.load_module()\n\n\n# If some modeling modules should be ignored for all checks, they should be added in the nested list\n# _ignore_modules of this function.\ndef get_model_modules():\n \"\"\" Get the model modules inside the transformers library. \"\"\"\n _ignore_modules = [\n \"modeling_auto\",\n \"modeling_encoder_decoder\",\n \"modeling_marian\",\n \"modeling_mmbt\",\n \"modeling_outputs\",\n \"modeling_retribert\",\n \"modeling_utils\",\n \"modeling_flax_auto\",\n \"modeling_flax_utils\",\n \"modeling_transfo_xl_utilities\",\n \"modeling_tf_auto\",\n \"modeling_tf_outputs\",\n \"modeling_tf_pytorch_utils\",\n \"modeling_tf_utils\",\n \"modeling_tf_transfo_xl_utilities\",\n ]\n modules = []\n for model in dir(transformers.models):\n # There are some magic dunder attributes in the dir, we ignore them\n if not model.startswith(\"__\"):\n model_module = getattr(transformers.models, model)\n for submodule in dir(model_module):\n if submodule.startswith(\"modeling\") and submodule not in _ignore_modules:\n modeling_module = getattr(model_module, submodule)\n if inspect.ismodule(modeling_module):\n modules.append(modeling_module)\n return modules\n\n\ndef get_models(module):\n \"\"\" Get the objects in module that are models.\"\"\"\n models = []\n model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel)\n for attr_name in dir(module):\n if \"Pretrained\" in attr_name or \"PreTrained\" in attr_name:\n continue\n attr = getattr(module, attr_name)\n if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:\n models.append((attr_name, attr))\n return models\n\n\n# If some test_modeling files should be ignored when checking models are all tested, they should be added in the\n# nested list _ignore_files of this function.\ndef get_model_test_files():\n \"\"\" Get the model test files.\"\"\"\n _ignore_files = [\n \"test_modeling_common\",\n \"test_modeling_encoder_decoder\",\n \"test_modeling_marian\",\n \"test_modeling_tf_common\",\n ]\n test_files = []\n for filename in os.listdir(PATH_TO_TESTS):\n if (\n os.path.isfile(f\"{PATH_TO_TESTS}/{filename}\")\n and filename.startswith(\"test_modeling\")\n and not os.path.splitext(filename)[0] in _ignore_files\n ):\n test_files.append(filename)\n return test_files\n\n\n# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class\n# for the all_model_classes variable.\ndef find_tested_models(test_file):\n \"\"\" Parse the content of test_file to detect what's in all_model_classes\"\"\"\n # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class\n with open(os.path.join(PATH_TO_TESTS, test_file), \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)\", content)\n # Check with one less parenthesis\n if len(all_models) == 0:\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(([^\\)]*)\\)\", content)\n if len(all_models) > 0:\n model_tested = []\n for entry in all_models:\n for line in entry.split(\",\"):\n name = line.strip()\n if len(name) > 0:\n model_tested.append(name)\n return model_tested\n\n\ndef check_models_are_tested(module, test_file):\n \"\"\" Check models defined in module are tested in test_file.\"\"\"\n defined_models = get_models(module)\n tested_models = find_tested_models(test_file)\n if tested_models is None:\n if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:\n return\n return [\n f\"{test_file} should define `all_model_classes` to apply common tests to the models it tests. \"\n + \"If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file \"\n + \"`utils/check_repo.py`.\"\n ]\n failures = []\n for model_name, _ in defined_models:\n if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not tested in \"\n + f\"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.\"\n + \"If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`\"\n + \"in the file `utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_tested():\n \"\"\" Check all models are properly tested.\"\"\"\n modules = get_model_modules()\n test_files = get_model_test_files()\n failures = []\n for module in modules:\n test_file = f\"test_{module.__name__.split('.')[-1]}.py\"\n if test_file not in test_files:\n failures.append(f\"{module.__name__} does not have its corresponding test file {test_file}.\")\n new_failures = check_models_are_tested(module, test_file)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\ndef get_all_auto_configured_models():\n \"\"\" Return the list of all models in at least one auto class.\"\"\"\n result = set() # To avoid duplicates we concatenate all model classes in a set.\n for attr_name in dir(transformers.models.auto.modeling_auto):\n if attr_name.startswith(\"MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values())\n for attr_name in dir(transformers.models.auto.modeling_tf_auto):\n if attr_name.startswith(\"TF_MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values())\n return [cls.__name__ for cls in result]\n\n\ndef check_models_are_auto_configured(module, all_auto_models):\n \"\"\" Check models defined in module are each in an auto class.\"\"\"\n defined_models = get_models(module)\n failures = []\n for model_name, _ in defined_models:\n if model_name not in all_auto_models and model_name not in IGNORE_NON_AUTO_CONFIGURED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. \"\n \"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file \"\n \"`utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_auto_configured():\n \"\"\" Check all models are each in an auto class.\"\"\"\n modules = get_model_modules()\n all_auto_models = get_all_auto_configured_models()\n failures = []\n for module in modules:\n new_failures = check_models_are_auto_configured(module, all_auto_models)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\n_re_decorator = re.compile(r\"^\\s*@(\\S+)\\s+$\")\n\n\ndef check_decorator_order(filename):\n \"\"\" Check that in the test file `filename` the slow decorator is always last.\"\"\"\n with open(filename, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n lines = f.readlines()\n decorator_before = None\n errors = []\n for i, line in enumerate(lines):\n search = _re_decorator.search(line)\n if search is not None:\n decorator_name = search.groups()[0]\n if decorator_before is not None and decorator_name.startswith(\"parameterized\"):\n errors.append(i)\n decorator_before = decorator_name\n elif decorator_before is not None:\n decorator_before = None\n return errors\n\n\ndef check_all_decorator_order():\n \"\"\" Check that in all test files, the slow decorator is always last.\"\"\"\n errors = []\n for fname in os.listdir(PATH_TO_TESTS):\n if fname.endswith(\".py\"):\n filename = os.path.join(PATH_TO_TESTS, fname)\n new_errors = check_decorator_order(filename)\n errors += [f\"- {filename}, line {i}\" for i in new_errors]\n if len(errors) > 0:\n msg = \"\\n\".join(errors)\n raise ValueError(\n f\"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\\n{msg}\"\n )\n\n\ndef find_all_documented_objects():\n \"\"\" Parse the content of all doc files to detect which classes and functions it documents\"\"\"\n documented_obj = []\n for doc_file in Path(PATH_TO_DOC).glob(\"**/*.rst\"):\n with open(doc_file) as f:\n content = f.read()\n raw_doc_objs = re.findall(r\"(?:autoclass|autofunction):: transformers.(\\S+)\\s+\", content)\n documented_obj += [obj.split(\".\")[-1] for obj in raw_doc_objs]\n return documented_obj\n\n\n# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.\nDEPRECATED_OBJECTS = [\n \"AutoModelWithLMHead\",\n \"BartPretrainedModel\",\n \"GlueDataset\",\n \"GlueDataTrainingArguments\",\n \"LineByLineTextDataset\",\n \"LineByLineWithRefDataset\",\n \"LineByLineWithSOPTextDataset\",\n \"PretrainedBartModel\",\n \"PretrainedFSMTModel\",\n \"SingleSentenceClassificationProcessor\",\n \"SquadDataTrainingArguments\",\n \"SquadDataset\",\n \"SquadExample\",\n \"SquadFeatures\",\n \"SquadV1Processor\",\n \"SquadV2Processor\",\n \"TFAutoModelWithLMHead\",\n \"TFBartPretrainedModel\",\n \"TextDataset\",\n \"TextDatasetForNextSentencePrediction\",\n \"glue_compute_metrics\",\n \"glue_convert_examples_to_features\",\n \"glue_output_modes\",\n \"glue_processors\",\n \"glue_tasks_num_labels\",\n \"squad_convert_examples_to_features\",\n \"xnli_compute_metrics\",\n \"xnli_output_modes\",\n \"xnli_processors\",\n \"xnli_tasks_num_labels\",\n]\n\n# Exceptionally, some objects should not be documented after all rules passed.\n# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!\nUNDOCUMENTED_OBJECTS = [\n \"AddedToken\", # This is a tokenizers class.\n \"BasicTokenizer\", # Internal, should never have been in the main init.\n \"DPRPretrainedReader\", # Like an Encoder.\n \"ModelCard\", # Internal type.\n \"SqueezeBertModule\", # Internal building block (should have been called SqueezeBertLayer)\n \"TFDPRPretrainedReader\", # Like an Encoder.\n \"TransfoXLCorpus\", # Internal type.\n \"WordpieceTokenizer\", # Internal, should never have been in the main init.\n \"absl\", # External module\n \"add_end_docstrings\", # Internal, should never have been in the main init.\n \"add_start_docstrings\", # Internal, should never have been in the main init.\n \"cached_path\", # Internal used for downloading models.\n \"convert_tf_weight_name_to_pt_weight_name\", # Internal used to convert model weights\n \"logger\", # Internal logger\n \"logging\", # External module\n]\n\n# This list should be empty. Objects in it should get their own doc page.\nSHOULD_HAVE_THEIR_OWN_PAGE = [\n # bert-japanese\n \"BertJapaneseTokenizer\",\n \"CharacterTokenizer\",\n \"MecabTokenizer\",\n # Bertweet\n \"BertweetTokenizer\",\n # Herbert\n \"HerbertTokenizer\",\n \"HerbertTokenizerFast\",\n # Phoebus\n \"PhobertTokenizer\",\n # Benchmarks\n \"PyTorchBenchmark\",\n \"PyTorchBenchmarkArguments\",\n \"TensorFlowBenchmark\",\n \"TensorFlowBenchmarkArguments\",\n]\n\n\ndef ignore_undocumented(name):\n \"\"\"Rules to determine if `name` should be undocumented.\"\"\"\n # NOT DOCUMENTED ON PURPOSE.\n # Magic attributes are not documented.\n if name.startswith(\"__\"):\n return True\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.\n if (\n name.endswith(\"PreTrainedModel\")\n or name.endswith(\"Decoder\")\n or name.endswith(\"Encoder\")\n or name.endswith(\"Layer\")\n or name.endswith(\"Embeddings\")\n or name.endswith(\"Attention\")\n ):\n return True\n # Submodules are not documented.\n if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(\n os.path.join(PATH_TO_TRANSFORMERS, f\"{name}.py\")\n ):\n return True\n # All load functions are not documented.\n if name.startswith(\"load_tf\") or name.startswith(\"load_pytorch\"):\n return True\n # is_xxx_available functions are not documented.\n if name.startswith(\"is_\") and name.endswith(\"_available\"):\n return True\n # Deprecated objects are not documented.\n if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:\n return True\n # MMBT model does not really work.\n if name.startswith(\"MMBT\"):\n return True\n\n # NOT DOCUMENTED BUT NOT ON PURPOSE, SHOULD BE FIXED!\n # All data collators should be documented\n if name.startswith(\"DataCollator\") or name.endswith(\"data_collator\"):\n return True\n if name in SHOULD_HAVE_THEIR_OWN_PAGE:\n return True\n return False\n\n\ndef check_all_objects_are_documented():\n \"\"\" Check all models are properly documented.\"\"\"\n documented_objs = find_all_documented_objects()\n undocumented_objs = [c for c in dir(transformers) if c not in documented_objs and not ignore_undocumented(c)]\n if len(undocumented_objs) > 0:\n raise Exception(\n \"The following objects are in the public init so should be documented:\\n - \"\n + \"\\n - \".join(undocumented_objs)\n )\n\n\ndef check_repo_quality():\n \"\"\" Check all models are properly tested and documented.\"\"\"\n print(\"Checking all models are properly tested.\")\n check_all_decorator_order()\n check_all_models_are_tested()\n print(\"Checking all objects are properly documented.\")\n check_all_objects_are_documented()\n print(\"Checking all models are in at least one auto class.\")\n check_all_models_are_auto_configured()\n\n\nif __name__ == \"__main__\":\n check_repo_quality()\n",
"path": "utils/check_repo.py"
}
] | [
{
"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport inspect\nimport os\nimport re\nfrom pathlib import Path\n\n\n# All paths are set with the intent you should run this script from the root of the repo with the command\n# python utils/check_repo.py\nPATH_TO_TRANSFORMERS = \"src/transformers\"\nPATH_TO_TESTS = \"tests\"\nPATH_TO_DOC = \"docs/source\"\n\n# Update this list for models that are not tested with a comment explaining the reason it should not be.\n# Being in this list is an exception and should **not** be the rule.\nIGNORE_NON_TESTED = [\n # models to ignore for not tested\n \"BartDecoder\", # Building part of bigger (tested) model.\n \"BartEncoder\", # Building part of bigger (tested) model.\n \"BertLMHeadModel\", # Needs to be setup as decoder.\n \"DPREncoder\", # Building part of bigger (tested) model.\n \"DPRSpanPredictor\", # Building part of bigger (tested) model.\n \"ProphetNetDecoderWrapper\", # Building part of bigger (tested) model.\n \"ReformerForMaskedLM\", # Needs to be setup as decoder.\n \"T5Stack\", # Building part of bigger (tested) model.\n \"TFDPREncoder\", # Building part of bigger (tested) model.\n \"TFDPRSpanPredictor\", # Building part of bigger (tested) model.\n \"TFElectraMainLayer\", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)\n \"TFRobertaForMultipleChoice\", # TODO: fix\n]\n\n# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't\n# trigger the common tests.\nTEST_FILES_WITH_NO_COMMON_TESTS = [\n \"test_modeling_camembert.py\",\n \"test_modeling_flax_bert.py\",\n \"test_modeling_flax_roberta.py\",\n \"test_modeling_mbart.py\",\n \"test_modeling_mt5.py\",\n \"test_modeling_pegasus.py\",\n \"test_modeling_tf_camembert.py\",\n \"test_modeling_tf_mt5.py\",\n \"test_modeling_tf_xlm_roberta.py\",\n \"test_modeling_xlm_prophetnet.py\",\n \"test_modeling_xlm_roberta.py\",\n]\n\n# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and\n# should **not** be the rule.\nIGNORE_NON_AUTO_CONFIGURED = [\n # models to ignore for model xxx mapping\n \"BartDecoder\",\n \"BartEncoder\",\n \"DPRContextEncoder\",\n \"DPREncoder\",\n \"DPRReader\",\n \"DPRSpanPredictor\",\n \"FlaubertForQuestionAnswering\",\n \"FunnelBaseModel\",\n \"GPT2DoubleHeadsModel\",\n \"MT5EncoderModel\",\n \"OpenAIGPTDoubleHeadsModel\",\n \"ProphetNetDecoder\",\n \"ProphetNetEncoder\",\n \"ProphetNetDecoderWrapper\",\n \"RagModel\",\n \"RagSequenceForGeneration\",\n \"RagTokenForGeneration\",\n \"T5Stack\",\n \"T5EncoderModel\",\n \"TFDPRContextEncoder\",\n \"TFDPREncoder\",\n \"TFDPRReader\",\n \"TFDPRSpanPredictor\",\n \"TFFunnelBaseModel\",\n \"TFGPT2DoubleHeadsModel\",\n \"TFMT5EncoderModel\",\n \"TFOpenAIGPTDoubleHeadsModel\",\n \"TFT5EncoderModel\",\n \"XLMForQuestionAnswering\",\n \"XLMProphetNetDecoder\",\n \"XLMProphetNetEncoder\",\n \"XLNetForQuestionAnswering\",\n]\n\n# This is to make sure the transformers module imported is the one in the repo.\nspec = importlib.util.spec_from_file_location(\n \"transformers\",\n os.path.join(PATH_TO_TRANSFORMERS, \"__init__.py\"),\n submodule_search_locations=[PATH_TO_TRANSFORMERS],\n)\ntransformers = spec.loader.load_module()\n\n\n# If some modeling modules should be ignored for all checks, they should be added in the nested list\n# _ignore_modules of this function.\ndef get_model_modules():\n \"\"\" Get the model modules inside the transformers library. \"\"\"\n _ignore_modules = [\n \"modeling_auto\",\n \"modeling_encoder_decoder\",\n \"modeling_marian\",\n \"modeling_mmbt\",\n \"modeling_outputs\",\n \"modeling_retribert\",\n \"modeling_utils\",\n \"modeling_flax_auto\",\n \"modeling_flax_utils\",\n \"modeling_transfo_xl_utilities\",\n \"modeling_tf_auto\",\n \"modeling_tf_outputs\",\n \"modeling_tf_pytorch_utils\",\n \"modeling_tf_utils\",\n \"modeling_tf_transfo_xl_utilities\",\n ]\n modules = []\n for model in dir(transformers.models):\n # There are some magic dunder attributes in the dir, we ignore them\n if not model.startswith(\"__\"):\n model_module = getattr(transformers.models, model)\n for submodule in dir(model_module):\n if submodule.startswith(\"modeling\") and submodule not in _ignore_modules:\n modeling_module = getattr(model_module, submodule)\n if inspect.ismodule(modeling_module):\n modules.append(modeling_module)\n return modules\n\n\ndef get_models(module):\n \"\"\" Get the objects in module that are models.\"\"\"\n models = []\n model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel)\n for attr_name in dir(module):\n if \"Pretrained\" in attr_name or \"PreTrained\" in attr_name:\n continue\n attr = getattr(module, attr_name)\n if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:\n models.append((attr_name, attr))\n return models\n\n\n# If some test_modeling files should be ignored when checking models are all tested, they should be added in the\n# nested list _ignore_files of this function.\ndef get_model_test_files():\n \"\"\" Get the model test files.\"\"\"\n _ignore_files = [\n \"test_modeling_common\",\n \"test_modeling_encoder_decoder\",\n \"test_modeling_marian\",\n \"test_modeling_tf_common\",\n ]\n test_files = []\n for filename in os.listdir(PATH_TO_TESTS):\n if (\n os.path.isfile(f\"{PATH_TO_TESTS}/{filename}\")\n and filename.startswith(\"test_modeling\")\n and not os.path.splitext(filename)[0] in _ignore_files\n ):\n test_files.append(filename)\n return test_files\n\n\n# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class\n# for the all_model_classes variable.\ndef find_tested_models(test_file):\n \"\"\" Parse the content of test_file to detect what's in all_model_classes\"\"\"\n # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class\n with open(os.path.join(PATH_TO_TESTS, test_file), \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)\", content)\n # Check with one less parenthesis\n if len(all_models) == 0:\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(([^\\)]*)\\)\", content)\n if len(all_models) > 0:\n model_tested = []\n for entry in all_models:\n for line in entry.split(\",\"):\n name = line.strip()\n if len(name) > 0:\n model_tested.append(name)\n return model_tested\n\n\ndef check_models_are_tested(module, test_file):\n \"\"\" Check models defined in module are tested in test_file.\"\"\"\n defined_models = get_models(module)\n tested_models = find_tested_models(test_file)\n if tested_models is None:\n if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:\n return\n return [\n f\"{test_file} should define `all_model_classes` to apply common tests to the models it tests. \"\n + \"If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file \"\n + \"`utils/check_repo.py`.\"\n ]\n failures = []\n for model_name, _ in defined_models:\n if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not tested in \"\n + f\"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.\"\n + \"If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`\"\n + \"in the file `utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_tested():\n \"\"\" Check all models are properly tested.\"\"\"\n modules = get_model_modules()\n test_files = get_model_test_files()\n failures = []\n for module in modules:\n test_file = f\"test_{module.__name__.split('.')[-1]}.py\"\n if test_file not in test_files:\n failures.append(f\"{module.__name__} does not have its corresponding test file {test_file}.\")\n new_failures = check_models_are_tested(module, test_file)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\ndef get_all_auto_configured_models():\n \"\"\" Return the list of all models in at least one auto class.\"\"\"\n result = set() # To avoid duplicates we concatenate all model classes in a set.\n for attr_name in dir(transformers.models.auto.modeling_auto):\n if attr_name.startswith(\"MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values())\n for attr_name in dir(transformers.models.auto.modeling_tf_auto):\n if attr_name.startswith(\"TF_MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values())\n return [cls.__name__ for cls in result]\n\n\ndef check_models_are_auto_configured(module, all_auto_models):\n \"\"\" Check models defined in module are each in an auto class.\"\"\"\n defined_models = get_models(module)\n failures = []\n for model_name, _ in defined_models:\n if model_name not in all_auto_models and model_name not in IGNORE_NON_AUTO_CONFIGURED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. \"\n \"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file \"\n \"`utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_auto_configured():\n \"\"\" Check all models are each in an auto class.\"\"\"\n modules = get_model_modules()\n all_auto_models = get_all_auto_configured_models()\n failures = []\n for module in modules:\n new_failures = check_models_are_auto_configured(module, all_auto_models)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\n_re_decorator = re.compile(r\"^\\s*@(\\S+)\\s+$\")\n\n\ndef check_decorator_order(filename):\n \"\"\" Check that in the test file `filename` the slow decorator is always last.\"\"\"\n with open(filename, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n lines = f.readlines()\n decorator_before = None\n errors = []\n for i, line in enumerate(lines):\n search = _re_decorator.search(line)\n if search is not None:\n decorator_name = search.groups()[0]\n if decorator_before is not None and decorator_name.startswith(\"parameterized\"):\n errors.append(i)\n decorator_before = decorator_name\n elif decorator_before is not None:\n decorator_before = None\n return errors\n\n\ndef check_all_decorator_order():\n \"\"\" Check that in all test files, the slow decorator is always last.\"\"\"\n errors = []\n for fname in os.listdir(PATH_TO_TESTS):\n if fname.endswith(\".py\"):\n filename = os.path.join(PATH_TO_TESTS, fname)\n new_errors = check_decorator_order(filename)\n errors += [f\"- {filename}, line {i}\" for i in new_errors]\n if len(errors) > 0:\n msg = \"\\n\".join(errors)\n raise ValueError(\n f\"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\\n{msg}\"\n )\n\n\ndef find_all_documented_objects():\n \"\"\" Parse the content of all doc files to detect which classes and functions it documents\"\"\"\n documented_obj = []\n for doc_file in Path(PATH_TO_DOC).glob(\"**/*.rst\"):\n with open(doc_file) as f:\n content = f.read()\n raw_doc_objs = re.findall(r\"(?:autoclass|autofunction):: transformers.(\\S+)\\s+\", content)\n documented_obj += [obj.split(\".\")[-1] for obj in raw_doc_objs]\n return documented_obj\n\n\n# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.\nDEPRECATED_OBJECTS = [\n \"AutoModelWithLMHead\",\n \"BartPretrainedModel\",\n \"GlueDataset\",\n \"GlueDataTrainingArguments\",\n \"LineByLineTextDataset\",\n \"LineByLineWithRefDataset\",\n \"LineByLineWithSOPTextDataset\",\n \"PretrainedBartModel\",\n \"PretrainedFSMTModel\",\n \"SingleSentenceClassificationProcessor\",\n \"SquadDataTrainingArguments\",\n \"SquadDataset\",\n \"SquadExample\",\n \"SquadFeatures\",\n \"SquadV1Processor\",\n \"SquadV2Processor\",\n \"TFAutoModelWithLMHead\",\n \"TFBartPretrainedModel\",\n \"TextDataset\",\n \"TextDatasetForNextSentencePrediction\",\n \"glue_compute_metrics\",\n \"glue_convert_examples_to_features\",\n \"glue_output_modes\",\n \"glue_processors\",\n \"glue_tasks_num_labels\",\n \"squad_convert_examples_to_features\",\n \"xnli_compute_metrics\",\n \"xnli_output_modes\",\n \"xnli_processors\",\n \"xnli_tasks_num_labels\",\n]\n\n# Exceptionally, some objects should not be documented after all rules passed.\n# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!\nUNDOCUMENTED_OBJECTS = [\n \"AddedToken\", # This is a tokenizers class.\n \"BasicTokenizer\", # Internal, should never have been in the main init.\n \"DPRPretrainedReader\", # Like an Encoder.\n \"ModelCard\", # Internal type.\n \"SqueezeBertModule\", # Internal building block (should have been called SqueezeBertLayer)\n \"TFDPRPretrainedReader\", # Like an Encoder.\n \"TransfoXLCorpus\", # Internal type.\n \"WordpieceTokenizer\", # Internal, should never have been in the main init.\n \"absl\", # External module\n \"add_end_docstrings\", # Internal, should never have been in the main init.\n \"add_start_docstrings\", # Internal, should never have been in the main init.\n \"cached_path\", # Internal used for downloading models.\n \"convert_tf_weight_name_to_pt_weight_name\", # Internal used to convert model weights\n \"logger\", # Internal logger\n \"logging\", # External module\n]\n\n# This list should be empty. Objects in it should get their own doc page.\nSHOULD_HAVE_THEIR_OWN_PAGE = [\n # bert-japanese\n \"BertJapaneseTokenizer\",\n \"CharacterTokenizer\",\n \"MecabTokenizer\",\n # Herbert\n \"HerbertTokenizer\",\n \"HerbertTokenizerFast\",\n # Phoebus\n \"PhobertTokenizer\",\n # Benchmarks\n \"PyTorchBenchmark\",\n \"PyTorchBenchmarkArguments\",\n \"TensorFlowBenchmark\",\n \"TensorFlowBenchmarkArguments\",\n]\n\n\ndef ignore_undocumented(name):\n \"\"\"Rules to determine if `name` should be undocumented.\"\"\"\n # NOT DOCUMENTED ON PURPOSE.\n # Magic attributes are not documented.\n if name.startswith(\"__\"):\n return True\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.\n if (\n name.endswith(\"PreTrainedModel\")\n or name.endswith(\"Decoder\")\n or name.endswith(\"Encoder\")\n or name.endswith(\"Layer\")\n or name.endswith(\"Embeddings\")\n or name.endswith(\"Attention\")\n ):\n return True\n # Submodules are not documented.\n if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(\n os.path.join(PATH_TO_TRANSFORMERS, f\"{name}.py\")\n ):\n return True\n # All load functions are not documented.\n if name.startswith(\"load_tf\") or name.startswith(\"load_pytorch\"):\n return True\n # is_xxx_available functions are not documented.\n if name.startswith(\"is_\") and name.endswith(\"_available\"):\n return True\n # Deprecated objects are not documented.\n if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:\n return True\n # MMBT model does not really work.\n if name.startswith(\"MMBT\"):\n return True\n\n # NOT DOCUMENTED BUT NOT ON PURPOSE, SHOULD BE FIXED!\n # All data collators should be documented\n if name.startswith(\"DataCollator\") or name.endswith(\"data_collator\"):\n return True\n if name in SHOULD_HAVE_THEIR_OWN_PAGE:\n return True\n return False\n\n\ndef check_all_objects_are_documented():\n \"\"\" Check all models are properly documented.\"\"\"\n documented_objs = find_all_documented_objects()\n undocumented_objs = [c for c in dir(transformers) if c not in documented_objs and not ignore_undocumented(c)]\n if len(undocumented_objs) > 0:\n raise Exception(\n \"The following objects are in the public init so should be documented:\\n - \"\n + \"\\n - \".join(undocumented_objs)\n )\n\n\ndef check_repo_quality():\n \"\"\" Check all models are properly tested and documented.\"\"\"\n print(\"Checking all models are properly tested.\")\n check_all_decorator_order()\n check_all_models_are_tested()\n print(\"Checking all objects are properly documented.\")\n check_all_objects_are_documented()\n print(\"Checking all models are in at least one auto class.\")\n check_all_models_are_auto_configured()\n\n\nif __name__ == \"__main__\":\n check_repo_quality()\n",
"path": "utils/check_repo.py"
}
] | diff --git a/docs/source/index.rst b/docs/source/index.rst
index 1ada9c18d71c..f8b9c43670b6 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -356,6 +356,7 @@ TensorFlow and/or Flax.
model_doc/bart
model_doc/barthez
model_doc/bert
+ model_doc/bertweet
model_doc/bertgeneration
model_doc/blenderbot
model_doc/camembert
diff --git a/docs/source/model_doc/bertweet.rst b/docs/source/model_doc/bertweet.rst
new file mode 100644
index 000000000000..4fe1470def83
--- /dev/null
+++ b/docs/source/model_doc/bertweet.rst
@@ -0,0 +1,64 @@
+..
+ Copyright 2020 The HuggingFace Team. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations under the License.
+
+Bertweet
+-----------------------------------------------------------------------------------------------------------------------
+
+Overview
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The BERTweet model was proposed in `BERTweet: A pre-trained language model for English Tweets
+<https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf>`__ by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen.
+
+The abstract from the paper is the following:
+
+*We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having
+the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et
+al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al.,
+2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks:
+Part-of-speech tagging, Named-entity recognition and text classification.*
+
+Example of use:
+
+.. code-block::
+
+ import torch
+ from transformers import AutoModel, AutoTokenizer
+
+ bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
+
+ # For transformers v4.x+:
+ tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
+
+ # For transformers v3.x:
+ # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
+
+ # INPUT TWEET IS ALREADY NORMALIZED!
+ line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
+
+ input_ids = torch.tensor([tokenizer.encode(line)])
+
+ with torch.no_grad():
+ features = bertweet(input_ids) # Models outputs are now tuples
+
+ ## With TensorFlow 2.0+:
+ # from transformers import TFAutoModel
+ # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
+
+
+The original code can be found `here <https://github.com/VinAIResearch/BERTweet>`__.
+
+BertweetTokenizer
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: transformers.BertweetTokenizer
+ :members:
diff --git a/utils/check_repo.py b/utils/check_repo.py
index 5fd890a34770..7897f1ca6212 100644
--- a/utils/check_repo.py
+++ b/utils/check_repo.py
@@ -382,8 +382,6 @@ def find_all_documented_objects():
"BertJapaneseTokenizer",
"CharacterTokenizer",
"MecabTokenizer",
- # Bertweet
- "BertweetTokenizer",
# Herbert
"HerbertTokenizer",
"HerbertTokenizerFast",
|
paperless-ngx__paperless-ngx-2280 | [Bug] cannot save Mail Rule with "mail and attachment as seperate documents" in 1.11.1
Maybe it's just me, but I cannot save Mail Rule with "mail and attachment as seperate documents".
_Originally posted by @Limerick-gh in https://github.com/paperless-ngx/paperless-ngx/discussions/2265#discussioncomment-4557234_
[Bug] Missing consumption scope options in frontend
### Discussed in https://github.com/paperless-ngx/paperless-ngx/discussions/2265
<div type='discussions-op-text'>
<sup>Originally posted by **morremeyer** December 30, 2022</sup>
With #2000, frontend configuration for mail consumption was added.
With #848, at about the same time, email body & .eml file consumption was added.
#848 added the **consumption scope** for email consumption (see https://github.com/p-h-a-i-l/paperless-ngx/blob/0fda35723d62275a5beb783cbf9061d4d4a15703/src/paperless_mail/models.py#L59-L65) to decide between consuming:
* only the attachments
* the full email as .eml
* the full email as .eml **and** the attachments
The **consumption scope** is not yet configurable on the frontend. I'd be really happy if it were configurable in the frontend in a future version.
I'm pretty sure someone already has that planned, but I couldn't find an issue or discussion for it, so I'm opening this one to track this request.</div>
| [
{
"content": "from documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom rest_framework import serializers\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(serializers.ModelSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n ]\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0:\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n mail_account = MailAccount.objects.create(**validated_data)\n return mail_account\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(serializers.ModelSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = MailRule.objects.create(**validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n",
"path": "src/paperless_mail/serialisers.py"
}
] | [
{
"content": "from documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom rest_framework import serializers\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(serializers.ModelSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n ]\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0:\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n mail_account = MailAccount.objects.create(**validated_data)\n return mail_account\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(serializers.ModelSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = MailRule.objects.create(**validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n",
"path": "src/paperless_mail/serialisers.py"
}
] | diff --git a/src-ui/messages.xlf b/src-ui/messages.xlf
index d833ae3eead..21ac728b301 100644
--- a/src-ui/messages.xlf
+++ b/src-ui/messages.xlf
@@ -967,7 +967,7 @@
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">36</context>
+ <context context-type="linenumber">37</context>
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/storage-path-edit-dialog/storage-path-edit-dialog.component.html</context>
@@ -1006,7 +1006,7 @@
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">37</context>
+ <context context-type="linenumber">38</context>
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/storage-path-edit-dialog/storage-path-edit-dialog.component.html</context>
@@ -1194,141 +1194,159 @@
<context context-type="linenumber">14</context>
</context-group>
</trans-unit>
+ <trans-unit id="559099472394646919" datatype="html">
+ <source>Consumption scope</source>
+ <context-group purpose="location">
+ <context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
+ <context context-type="linenumber">15</context>
+ </context-group>
+ </trans-unit>
<trans-unit id="56643687972548912" datatype="html">
<source>See docs for .eml processing requirements</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">14</context>
+ <context context-type="linenumber">15</context>
</context-group>
</trans-unit>
<trans-unit id="5488632521862493221" datatype="html">
<source>Paperless will only process mails that match <x id="START_EMPHASISED_TEXT" ctype="x-em" equiv-text="<em>"/>all<x id="CLOSE_EMPHASISED_TEXT" ctype="x-em" equiv-text="</em>"/> of the filters specified below.</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">17</context>
+ <context context-type="linenumber">18</context>
</context-group>
</trans-unit>
<trans-unit id="6925928412364847639" datatype="html">
<source>Filter from</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">18</context>
+ <context context-type="linenumber">19</context>
</context-group>
</trans-unit>
<trans-unit id="8497813481090627874" datatype="html">
<source>Filter subject</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">19</context>
+ <context context-type="linenumber">20</context>
</context-group>
</trans-unit>
<trans-unit id="7314357616097563149" datatype="html">
<source>Filter body</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">20</context>
+ <context context-type="linenumber">21</context>
</context-group>
</trans-unit>
<trans-unit id="5031687746498952417" datatype="html">
<source>Filter attachment filename</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">21</context>
+ <context context-type="linenumber">22</context>
</context-group>
</trans-unit>
<trans-unit id="4245210767172267486" datatype="html">
<source>Only consume documents which entirely match this filename if specified. Wildcards such as *.pdf or *invoice* are allowed. Case insensitive.</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">21</context>
+ <context context-type="linenumber">22</context>
</context-group>
</trans-unit>
<trans-unit id="9216117865911519658" datatype="html">
<source>Action</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">24</context>
+ <context context-type="linenumber">25</context>
</context-group>
</trans-unit>
<trans-unit id="4274038999388817994" datatype="html">
<source>Action is only performed when documents are consumed from the mail. Mails without attachments remain entirely untouched.</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">24</context>
+ <context context-type="linenumber">25</context>
</context-group>
</trans-unit>
<trans-unit id="1261794314435932203" datatype="html">
<source>Action parameter</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">25</context>
+ <context context-type="linenumber">26</context>
</context-group>
</trans-unit>
<trans-unit id="6093797930511670257" datatype="html">
<source>Assign title from</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">26</context>
+ <context context-type="linenumber">27</context>
</context-group>
</trans-unit>
<trans-unit id="6695990587380209737" datatype="html">
<source>Assign document type</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">28</context>
+ <context context-type="linenumber">29</context>
</context-group>
</trans-unit>
<trans-unit id="4754802869258527587" datatype="html">
<source>Assign correspondent from</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">29</context>
+ <context context-type="linenumber">30</context>
</context-group>
</trans-unit>
<trans-unit id="4875491778188965469" datatype="html">
<source>Assign correspondent</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">30</context>
+ <context context-type="linenumber">31</context>
</context-group>
</trans-unit>
<trans-unit id="1519954996184640001" datatype="html">
<source>Error</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html</context>
- <context context-type="linenumber">35</context>
+ <context context-type="linenumber">36</context>
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/services/toast.service.ts</context>
<context context-type="linenumber">32</context>
</context-group>
</trans-unit>
- <trans-unit id="6233529027580744166" datatype="html">
- <source>Only process attachments.</source>
+ <trans-unit id="6886003843406464884" datatype="html">
+ <source>Only process attachments</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">24</context>
+ <context context-type="linenumber">25</context>
+ </context-group>
+ <context-group purpose="location">
+ <context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
+ <context context-type="linenumber">36</context>
</context-group>
</trans-unit>
- <trans-unit id="3622418743488695840" datatype="html">
- <source>Process with embedded attachments as .eml</source>
+ <trans-unit id="936923743212522897" datatype="html">
+ <source>Process all files, including 'inline' attachments</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">28</context>
+ <context context-type="linenumber">29</context>
</context-group>
</trans-unit>
- <trans-unit id="7205371824972320534" datatype="html">
- <source>Process as .eml and attachments as separate documents</source>
+ <trans-unit id="9025522236384167767" datatype="html">
+ <source>Process message as .eml</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">32</context>
+ <context context-type="linenumber">40</context>
+ </context-group>
+ </trans-unit>
+ <trans-unit id="7411485377918318115" datatype="html">
+ <source>Process message as .eml and attachments separately</source>
+ <context-group purpose="location">
+ <context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
+ <context context-type="linenumber">44</context>
</context-group>
</trans-unit>
<trans-unit id="7022070615528435141" datatype="html">
<source>Delete</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">39</context>
+ <context context-type="linenumber">51</context>
</context-group>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/document-detail/document-detail.component.html</context>
@@ -1391,84 +1409,84 @@
<source>Move to specified folder</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">43</context>
+ <context context-type="linenumber">55</context>
</context-group>
</trans-unit>
<trans-unit id="4593278936733161020" datatype="html">
<source>Mark as read, don't process read mails</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">47</context>
+ <context context-type="linenumber">59</context>
</context-group>
</trans-unit>
<trans-unit id="2378921144019636516" datatype="html">
<source>Flag the mail, don't process flagged mails</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">51</context>
+ <context context-type="linenumber">63</context>
</context-group>
</trans-unit>
<trans-unit id="6457024618858980302" datatype="html">
<source>Tag the mail with specified tag, don't process tagged mails</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">55</context>
+ <context context-type="linenumber">67</context>
</context-group>
</trans-unit>
<trans-unit id="4673329664686432878" datatype="html">
<source>Use subject as title</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">62</context>
+ <context context-type="linenumber">74</context>
</context-group>
</trans-unit>
<trans-unit id="8645471396972938185" datatype="html">
<source>Use attachment filename as title</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">66</context>
+ <context context-type="linenumber">78</context>
</context-group>
</trans-unit>
<trans-unit id="1568902914205618549" datatype="html">
<source>Do not assign a correspondent</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">73</context>
+ <context context-type="linenumber">85</context>
</context-group>
</trans-unit>
<trans-unit id="3567746385454588269" datatype="html">
<source>Use mail address</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">77</context>
+ <context context-type="linenumber">89</context>
</context-group>
</trans-unit>
<trans-unit id="445154175758965852" datatype="html">
<source>Use name (or mail address if not available)</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">81</context>
+ <context context-type="linenumber">93</context>
</context-group>
</trans-unit>
<trans-unit id="1258862217749148424" datatype="html">
<source>Use correspondent selected below</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">85</context>
+ <context context-type="linenumber">97</context>
</context-group>
</trans-unit>
<trans-unit id="3147349817770432927" datatype="html">
<source>Create new mail rule</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">125</context>
+ <context context-type="linenumber">137</context>
</context-group>
</trans-unit>
<trans-unit id="3374331029704382439" datatype="html">
<source>Edit mail rule</source>
<context-group purpose="location">
<context context-type="sourcefile">src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts</context>
- <context context-type="linenumber">129</context>
+ <context context-type="linenumber">141</context>
</context-group>
</trans-unit>
<trans-unit id="6036319582202941456" datatype="html">
diff --git a/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html b/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html
index 4af044407c2..64d54a72cdc 100644
--- a/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html
+++ b/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.html
@@ -11,7 +11,8 @@ <h4 class="modal-title" id="modal-basic-title">{{getTitle()}}</h4>
<app-input-select i18n-title title="Account" [items]="accounts" formControlName="account"></app-input-select>
<app-input-text i18n-title title="Folder" formControlName="folder" i18n-hint hint="Subfolders must be separated by a delimiter, often a dot ('.') or slash ('/'), but it varies by mail server." [error]="error?.folder"></app-input-text>
<app-input-number i18n-title title="Maximum age (days)" formControlName="maximum_age" [showAdd]="false" [error]="error?.maximum_age"></app-input-number>
- <app-input-select i18n-title title="Attachment type" [items]="attachmentTypeOptions" formControlName="attachment_type" i18n-hint hint="See docs for .eml processing requirements"></app-input-select>
+ <app-input-select i18n-title title="Attachment type" [items]="attachmentTypeOptions" formControlName="attachment_type"></app-input-select>
+ <app-input-select i18n-title title="Consumption scope" [items]="consumptionScopeOptions" formControlName="consumption_scope" i18n-hint hint="See docs for .eml processing requirements"></app-input-select>
</div>
<div class="col">
<p class="small" i18n>Paperless will only process mails that match <em>all</em> of the filters specified below.</p>
diff --git a/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts b/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts
index a2486e141d1..63699fd66a3 100644
--- a/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts
+++ b/src-ui/src/app/components/common/edit-dialog/mail-rule-edit-dialog/mail-rule-edit-dialog.component.ts
@@ -12,6 +12,7 @@ import {
MailMetadataCorrespondentOption,
MailMetadataTitleOption,
PaperlessMailRule,
+ MailRuleConsumptionScope,
} from 'src/app/data/paperless-mail-rule'
import { CorrespondentService } from 'src/app/services/rest/correspondent.service'
import { DocumentTypeService } from 'src/app/services/rest/document-type.service'
@@ -21,15 +22,26 @@ import { MailRuleService } from 'src/app/services/rest/mail-rule.service'
const ATTACHMENT_TYPE_OPTIONS = [
{
id: MailFilterAttachmentType.Attachments,
- name: $localize`Only process attachments.`,
+ name: $localize`Only process attachments`,
},
{
- id: MailFilterAttachmentType.Email_Only,
- name: $localize`Process with embedded attachments as .eml`,
+ id: MailFilterAttachmentType.Everything,
+ name: $localize`Process all files, including 'inline' attachments`,
},
+]
+
+const CONSUMPTION_SCOPE_OPTIONS = [
{
- id: MailFilterAttachmentType.Everything,
- name: $localize`Process as .eml and attachments as separate documents`,
+ id: MailRuleConsumptionScope.Attachments,
+ name: $localize`Only process attachments`,
+ },
+ {
+ id: MailRuleConsumptionScope.Email_Only,
+ name: $localize`Process message as .eml`,
+ },
+ {
+ id: MailRuleConsumptionScope.Everything,
+ name: $localize`Process message as .eml and attachments separately`,
},
]
@@ -140,6 +152,7 @@ export class MailRuleEditDialogComponent extends EditDialogComponent<PaperlessMa
filter_attachment_filename: new FormControl(null),
maximum_age: new FormControl(null),
attachment_type: new FormControl(MailFilterAttachmentType.Attachments),
+ consumption_scope: new FormControl(MailRuleConsumptionScope.Attachments),
action: new FormControl(MailAction.MarkRead),
action_parameter: new FormControl(null),
assign_title_from: new FormControl(MailMetadataTitleOption.FromSubject),
@@ -181,4 +194,8 @@ export class MailRuleEditDialogComponent extends EditDialogComponent<PaperlessMa
get metadataCorrespondentOptions() {
return METADATA_CORRESPONDENT_OPTIONS
}
+
+ get consumptionScopeOptions() {
+ return CONSUMPTION_SCOPE_OPTIONS
+ }
}
diff --git a/src-ui/src/app/data/paperless-mail-rule.ts b/src-ui/src/app/data/paperless-mail-rule.ts
index 1c9f1be7b18..9f526d4042d 100644
--- a/src-ui/src/app/data/paperless-mail-rule.ts
+++ b/src-ui/src/app/data/paperless-mail-rule.ts
@@ -1,6 +1,11 @@
import { ObjectWithId } from './object-with-id'
export enum MailFilterAttachmentType {
+ Attachments = 1,
+ Everything = 2,
+}
+
+export enum MailRuleConsumptionScope {
Attachments = 1,
Email_Only = 2,
Everything = 3,
diff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py
index 5944656a7ef..0d15f617cd6 100644
--- a/src/paperless_mail/serialisers.py
+++ b/src/paperless_mail/serialisers.py
@@ -86,6 +86,7 @@ class Meta:
"assign_document_type",
"order",
"attachment_type",
+ "consumption_scope",
]
def update(self, instance, validated_data):
|
nipy__nipype-2852 | nipype/conftest.py should be excluded from API documentation
### Summary
The auto-generated API docs include `conftest.py`, which has a fixture. Pytest has turned calling a fixture directly into an error, and apparently the fixture is getting called when the docs are generated.
This is what's currently breaking the Circle builds.
| [
{
"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n",
"path": "tools/build_interface_docs.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n '\\.conftest',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n",
"path": "tools/build_interface_docs.py"
}
] | diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py
index 6fa518381e..37b99cb476 100755
--- a/tools/build_interface_docs.py
+++ b/tools/build_interface_docs.py
@@ -41,6 +41,7 @@
'\.pipeline\.s3_node_wrapper$',
'\.testing',
'\.scripts',
+ '\.conftest',
]
docwriter.class_skip_patterns += [
'AFNICommand',
|
akvo__akvo-rsr-3132 | When logged in landing page should be "myRSR"
| [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\".\"\"\"\n return HttpResponseRedirect(reverse('project-directory', args=[]))\n",
"path": "akvo/rsr/views/__init__.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\"Redirect user to project directory or My RSR.\"\"\"\n\n redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n return HttpResponseRedirect(reverse(redirect_url, args=[]))\n",
"path": "akvo/rsr/views/__init__.py"
}
] | diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py
index 517aa420d7..a1f5dbf4ca 100644
--- a/akvo/rsr/views/__init__.py
+++ b/akvo/rsr/views/__init__.py
@@ -11,5 +11,7 @@
def index(request):
- """."""
- return HttpResponseRedirect(reverse('project-directory', args=[]))
+ """Redirect user to project directory or My RSR."""
+
+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'
+ return HttpResponseRedirect(reverse(redirect_url, args=[]))
|
pulp__pulpcore-4727 | pulp file python package reporting wrongly
Starting with pulpcore 3.40 the pulp_file plugins python package started reporting as pulp_file instead of pulp-file.
| [
{
"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.41.1.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n",
"path": "pulp_file/app/__init__.py"
}
] | [
{
"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.41.1.dev\"\n python_package_name = \"pulp-file\" # TODO Add python_module_name\n domain_compatible = True\n",
"path": "pulp_file/app/__init__.py"
}
] | diff --git a/CHANGES/4724.bugfix b/CHANGES/4724.bugfix
new file mode 100644
index 0000000000..f5de72e61d
--- /dev/null
+++ b/CHANGES/4724.bugfix
@@ -0,0 +1 @@
+Fixed that `pulp_file` presented its `python_package` as `pulp_file` instead of `pulp-file`.
diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py
index 7ed000a8e8..d1bb39be6d 100644
--- a/pulp_file/app/__init__.py
+++ b/pulp_file/app/__init__.py
@@ -9,5 +9,5 @@ class PulpFilePluginAppConfig(PulpPluginAppConfig):
name = "pulp_file.app"
label = "file"
version = "3.41.1.dev"
- python_package_name = "pulp_file" # TODO Add python_module_name
+ python_package_name = "pulp-file" # TODO Add python_module_name
domain_compatible = True
|
cocotb__cocotb-1298 | Change setup.py to list the version as 1.x-dev for versions installed from github
As suggested by @themperek, it would be neat if cocotb behaved like this:
```
> pip install git+https://github.com/cocotb/cocotb
> python -c "import cocotb; print(cocotb.__version__)"
1.4.0-dev
```
| [
{
"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = '1.3.0'\n",
"path": "cocotb/_version.py"
}
] | [
{
"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = '1.4.0.dev0'\n",
"path": "cocotb/_version.py"
}
] | diff --git a/cocotb/_version.py b/cocotb/_version.py
index e88ee234c7..46ea3afc99 100644
--- a/cocotb/_version.py
+++ b/cocotb/_version.py
@@ -5,4 +5,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
-__version__ = '1.3.0'
+__version__ = '1.4.0.dev0'
|
locustio__locust-1760 | Locust stopped working after Flast 2.0 got released
in setup.py I can see:
` "flask>=1.1.2", `
I guess it should be hardcoded to ==1.1.2 for now.
it crashes with:
```
File "/root/.local/share/virtualenvs/xxxxxxx/lib/python3.6/site-packages/locust/web.py", line 102, in __init__
app.jinja_options["extensions"].append("jinja2.ext.do")
KeyError: 'extensions'
```
| [
{
"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask==1.1.2\",\n \"Werkzeug>=1.0.1\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index c2596f5e5e..a03f1a3f03 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
version=version,
install_requires=[
"gevent>=20.9.0",
- "flask>=1.1.2",
+ "flask==1.1.2",
"Werkzeug>=1.0.1",
"requests>=2.9.1",
"msgpack>=0.6.2",
|
quantumlib__Cirq-1160 | Broken Hadamard gate decomposition
Steps to reproduce:
```
In [1]: import cirq
In [2]: q = cirq.NamedQubit('q')
In [3]: cirq.Circuit.from_ops(cirq.decompose([cirq.H(q)]))._unitary_()
Out[3]:
array([[ 0.5+0.5j, 0.5+0.5j],
[ 0.5+0.5j, -0.5-0.5j]])
In [4]: cirq.Circuit.from_ops([cirq.H(q)])._unitary_()
Out[4]:
array([[ 0.70710678+0.j, 0.70710678+0.j],
[ 0.70710678+0.j, -0.70710678+0.j]])
```
Note that exponentiating the gate to a power different from 1.0 makes this work, suggesting a special casing of a decomposition is the culprit.
This affects other gates whose decomposition includes Hadamards (e.g. iSwaps).
There is a unit test that compares the unitary given by the gate and by its decomposition, but the assert it uses makes the comparison merely up to global phase. I think this is incorrect. Consider two qubits q0 and q1 and a circuit that applies U0 to q0 and U1 to q1. Suppose that the decomposition of U0 yields a unitary that is consistent with U0 merely up to global phase. What happens we you replace U0 with its decomposition? Well, this alters the *relative* phase between q0 and q1 producing observable effect.
| [
{
"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates that are commonly used in the literature.\"\"\"\nfrom typing import (\n Union, Tuple, Optional, List, Callable, cast, Iterable, Sequence,\n Any)\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, value\nfrom cirq.ops import (\n gate_features,\n eigen_gate,\n raw_types,\n gate_operation,\n)\nfrom cirq.type_workarounds import NotImplementedType\n\n# Note: avoiding 'from/as' because it creates a circular dependency in python 2.\nimport cirq.ops.phased_x_gate\n\n\nclass CZPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"Phases the |11⟩ state of two adjacent qubits by a fixed amount.\n\n A ParameterizedCZGate guaranteed to not be using the parameter key field.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 1, 1, 0])),\n (1, np.diag([0, 0, 0, 1])),\n ]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if protocols.is_parameterized(self):\n return NotImplemented\n\n c = 1j**(2 * self._exponent)\n one_one = linalg.slice_for_qubits_equal_to(axes, 0b11)\n target_tensor[one_one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _phase_by_(self, phase_turns, qubit_index):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', '@'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cz {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CZ'\n return 'CZ**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.CZ'\n return '(cirq.CZ**{!r})'.format(self._exponent)\n\n\ndef _rads_func_symbol(func_name: str,\n args: protocols.CircuitDiagramInfoArgs,\n half_turns: Any) -> str:\n unit = 'π' if args.use_unicode_characters else 'pi'\n if half_turns == 1:\n return '{}({})'.format(func_name, unit)\n if half_turns == -1:\n return '{}(-{})'.format(func_name, unit)\n return '{}({}{})'.format(func_name, half_turns, unit)\n\n\nclass XPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the X axis of the Bloch sphere.\"\"\"\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n zero = linalg.slice_for_qubits_equal_to(axes, 0)\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n available_buffer[zero] = target_tensor[one]\n available_buffer[one] = target_tensor[zero]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n available_buffer *= p\n return available_buffer\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, 0.5], [0.5, 0.5]])),\n (1, np.array([[0.5, -0.5], [-0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rx',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('X',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('x {0};\\n', qubits[0])\n else:\n return args.format('rx({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'X'\n return 'X**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Rx(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.X'\n return '(cirq.X**{!r})'.format(self._exponent)\n return (\n 'cirq.XPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass YPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the Y axis of the Bloch sphere.\"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, -0.5j], [0.5j, 0.5]])),\n (1, np.array([[0.5, 0.5j], [-0.5j, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Ry',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Y',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('y {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=0.5 + phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'Y'\n return 'Y**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Ry(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.Y'\n return '(cirq.Y**{!r})'.format(self._exponent)\n return (\n 'cirq.YPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass ZPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the Z axis of the Bloch sphere.\"\"\"\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if protocols.is_parameterized(self):\n return NotImplemented\n\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n c = 1j**(self._exponent * 2)\n target_tensor[one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rz',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n e = self._diagram_exponent(args)\n if e in [-0.25, 0.25]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('T',),\n exponent=cast(float, e) * 4)\n\n if e in [-0.5, 0.5]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('S',),\n exponent=cast(float, e) * 2)\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Z',),\n exponent=e)\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('z {0};\\n', qubits[0])\n else:\n return args.format('rz({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def __str__(self) -> str:\n if self._exponent == 0.25:\n return 'T'\n if self._exponent == -0.25:\n return 'T**-1'\n if self._exponent == 0.5:\n return 'S'\n if self._exponent == -0.5:\n return 'S**-1'\n if self._exponent == 1:\n return 'Z'\n return 'Z**{}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Rz(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 0.25:\n return 'cirq.T'\n if self._exponent == -0.25:\n return '(cirq.T**-1)'\n if self._exponent == 0.5:\n return 'cirq.S'\n if self._exponent == -0.5:\n return '(cirq.S**-1)'\n if self._exponent == 1:\n return 'cirq.Z'\n return '(cirq.Z**{!r})'.format(self._exponent)\n return (\n 'cirq.ZPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\[email protected]_equality\nclass MeasurementGate(raw_types.Gate):\n \"\"\"Indicates that qubits should be measured plus a key to identify results.\n\n Attributes:\n key: The string key of the measurement.\n invert_mask: A list of values indicating whether the corresponding\n qubits should be flipped. The list's length must not be longer than\n the number of qubits, but it is permitted to be shorted.\n Qubits with indices past the end of the mask are not flipped.\n \"\"\"\n\n def __init__(self,\n key: str = '',\n invert_mask: Tuple[bool, ...] = ()) -> None:\n self.key = key\n self.invert_mask = invert_mask or ()\n\n @staticmethod\n def is_measurement(op: Union[raw_types.Gate, raw_types.Operation]) -> bool:\n if isinstance(op, MeasurementGate):\n return True\n if (isinstance(op, gate_operation.GateOperation) and\n isinstance(op.gate, MeasurementGate)):\n return True\n return False\n\n def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':\n \"\"\"Toggles whether or not the measurement inverts various outputs.\"\"\"\n old_mask = self.invert_mask or ()\n n = max(len(old_mask) - 1, *bit_positions) + 1\n new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]\n for b in bit_positions:\n new_mask[b] = not new_mask[b]\n return MeasurementGate(key=self.key, invert_mask=tuple(new_mask))\n\n def validate_args(self, qubits):\n if (self.invert_mask is not None and\n len(self.invert_mask) > len(qubits)):\n raise ValueError('len(invert_mask) > len(qubits)')\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n n = (max(1, len(self.invert_mask))\n if args.known_qubit_count is None\n else args.known_qubit_count)\n symbols = ['M'] * n\n\n # Show which output bits are negated.\n if self.invert_mask:\n for i, b in enumerate(self.invert_mask):\n if b:\n symbols[i] = '!M'\n\n # Mention the measurement key.\n if (not args.known_qubits or\n self.key != _default_measurement_key(args.known_qubits)):\n symbols[0] += \"('{}')\".format(self.key)\n\n return protocols.CircuitDiagramInfo(tuple(symbols))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = (invert_mask\n + (False,) * (len(qubits) - len(invert_mask)))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(args.format(\n 'x {0}; // Invert the following measurement\\n', qubit))\n lines.append(args.format('measure {0} -> {1:meas}[{2}];\\n',\n qubit, self.key, i))\n return ''.join(lines)\n\n def __repr__(self):\n return 'cirq.MeasurementGate({}, {})'.format(repr(self.key),\n repr(self.invert_mask))\n\n def _value_equality_values_(self):\n return self.key, self.invert_mask\n\n\ndef _default_measurement_key(qubits: Iterable[raw_types.QubitId]) -> str:\n return ','.join(str(q) for q in qubits)\n\n\ndef measure(*qubits: raw_types.QubitId,\n key: Optional[str] = None,\n invert_mask: Tuple[bool, ...] = ()\n ) -> gate_operation.GateOperation:\n \"\"\"Returns a single MeasurementGate applied to all the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits that the measurement gate should measure.\n key: The string key of the measurement. If this is None, it defaults\n to a comma-separated list of the target qubits' str values.\n invert_mask: A list of Truthy or Falsey values indicating whether\n the corresponding qubits should be flipped. None indicates no\n inverting should be done.\n\n Returns:\n An operation targeting the given qubits with a measurement.\n\n Raises:\n ValueError if the qubits are not instances of QubitId.\n \"\"\"\n for qubit in qubits:\n if isinstance(qubit, np.ndarray):\n raise ValueError(\n 'measure() was called a numpy ndarray. Perhaps you meant '\n 'to call measure_state_vector on numpy array?'\n )\n elif not isinstance(qubit, raw_types.QubitId):\n raise ValueError(\n 'measure() was called with type different than QubitId.')\n\n if key is None:\n key = _default_measurement_key(qubits)\n return MeasurementGate(key, invert_mask).on(*qubits)\n\n\ndef measure_each(*qubits: raw_types.QubitId,\n key_func: Callable[[raw_types.QubitId], str] = str\n ) -> List[gate_operation.GateOperation]:\n \"\"\"Returns a list of operations individually measuring the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits to measure.\n key_func: Determines the key of the measurements of each qubit. Takes\n the qubit and returns the key for that qubit. Defaults to str.\n\n Returns:\n A list of operations individually measuring the given qubits.\n \"\"\"\n return [MeasurementGate(key_func(q)).on(q) for q in qubits]\n\n\nX = XPowGate() # Pauli X gate.\nY = YPowGate() # Pauli Y gate.\nZ = ZPowGate() # Pauli Z gate.\nCZ = CZPowGate() # Negates the amplitude of the |11⟩ state.\n\nS = Z**0.5\nT = Z**0.25\n\n\nclass HPowGate(eigen_gate.EigenGate, gate_features.SingleQubitGate):\n \"\"\"Rotation around the X+Z axis of the Bloch sphere.\"\"\"\n\n def _eigen_components(self):\n s = np.sqrt(2)\n\n component0 = np.array([\n [3 + 2 * s, 1 + s],\n [1 + s, 1]\n ]) / (4 + 2 * s)\n\n component1 = np.array([\n [3 - 2 * s, 1 - s],\n [1 - s, 1]\n ]) / (4 - 2 * s)\n\n return [(0, component0), (1, component1)]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zero = linalg.slice_for_qubits_equal_to(axes, 0)\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n target_tensor[one] -= target_tensor[zero]\n target_tensor[one] *= -0.5\n target_tensor[zero] -= target_tensor[one]\n p = 1j**(2 * self._exponent * self._global_shift)\n target_tensor *= np.sqrt(2) * p\n return target_tensor\n\n def _decompose_(self, qubits):\n q = qubits[0]\n\n if self._exponent == 1:\n yield Y(q)**0.5, X(q)\n return\n\n yield Y(q)**0.25\n yield X(q)**self._exponent\n yield Y(q)**-0.25\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(('H',))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('h {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {3};\\n'\n 'rx({1:half_turns}) {3};\\n'\n 'ry({2:half_turns}) {3};\\n',\n 0.25, self._exponent, -0.25, qubits[0])\n\n def __str__(self):\n if self._exponent == 1:\n return 'H'\n return 'H^{}'.format(self._exponent)\n\n def __repr__(self):\n if self._exponent == 1:\n return 'cirq.H'\n return '(cirq.H**{!r})'.format(self._exponent)\n\n\nH = HPowGate() # Hadamard gate.\n\n\nclass CNotPowGate(eigen_gate.EigenGate, gate_features.TwoQubitGate):\n \"\"\"The controlled-not gate, possibly raised to a power.\n\n When applying CNOT (controlled-not) to QuBits, you can either use\n positional arguments CNOT(q1, q2), where q2 is toggled when q1 is on,\n or named arguments CNOT(control=q1, target=q2).\n (Mixing the two is not permitted.)\n \"\"\"\n\n def _decompose_(self, qubits):\n c, t = qubits\n yield Y(t)**-0.5\n yield CZ(c, t)**self._exponent\n yield Y(t)**0.5\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0.5, 0.5],\n [0, 0, 0.5, 0.5]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0.5, -0.5],\n [0, 0, -0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', 'X'),\n exponent=self._diagram_exponent(args))\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n oo = linalg.slice_for_qubits_equal_to(axes, 0b11)\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n available_buffer[oo] = target_tensor[oo]\n target_tensor[oo] = target_tensor[zo]\n target_tensor[zo] = available_buffer[oo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cx {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CNOT'\n return 'CNOT**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.CNOT'\n return '(cirq.CNOT**{!r})'.format(self._exponent)\n\n def on(self, *args: raw_types.QubitId,\n **kwargs: raw_types.QubitId) -> gate_operation.GateOperation:\n if not kwargs:\n return super().on(*args)\n if not args and set(kwargs.keys()) == {'control', 'target'}:\n return super().on(kwargs['control'], kwargs['target'])\n raise ValueError(\n \"Expected two positional argument or else 'target' AND 'control' \"\n \"keyword arguments. But got args={!r}, kwargs={!r}.\".format(\n args, kwargs))\n\n\nCNOT = CNotPowGate() # Controlled Not Gate.\n\n\nclass SwapPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"The SWAP gate, possibly raised to a power. Exchanges qubits.\"\"\"\n\n def _decompose_(self, qubits):\n \"\"\"See base class.\"\"\"\n a, b = qubits\n yield CNOT(a, b)\n yield CNOT(b, a) ** self._exponent\n yield CNOT(a, b)\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 1]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n oz = linalg.slice_for_qubits_equal_to(axes, 0b10)\n available_buffer[zo] = target_tensor[zo]\n target_tensor[zo] = target_tensor[oz]\n target_tensor[oz] = available_buffer[zo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n if not args.use_unicode_characters:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('swap', 'swap'),\n exponent=self._diagram_exponent(args))\n return protocols.CircuitDiagramInfo(\n wire_symbols=('×', '×'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('swap {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'SWAP'\n return 'SWAP**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.SWAP'\n return '(cirq.SWAP**{!r})'.format(self._exponent)\n\n\nSWAP = SwapPowGate() # Exchanges two qubits' states.\n\n\nclass ISwapPowGate(eigen_gate.EigenGate,\n gate_features.InterchangeableQubitsGate,\n gate_features.TwoQubitGate):\n \"\"\"Rotates the |01⟩-vs-|10⟩ subspace of two qubits around its Bloch X-axis.\n\n When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More\n generally, this gate's matrix is defined as follows:\n\n ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)\n ≡ [1 0 0 0]\n [0 cos(π·t/2) i·sin(π·t/2) 0]\n [0 i·sin(π·t/2) cos(π·t/2) 0]\n [0 0 0 1]\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0, 0, 1])),\n (+0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n (-0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _decompose_(self, qubits):\n a, b = qubits\n\n yield CNOT(a, b)\n yield H(a)\n yield CNOT(b, a)\n yield S(a)**self._exponent\n yield CNOT(b, a)\n yield S(a)**-self._exponent\n yield H(a)\n yield CNOT(a, b)\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n oz = linalg.slice_for_qubits_equal_to(axes, 0b10)\n available_buffer[zo] = target_tensor[zo]\n target_tensor[zo] = target_tensor[oz]\n target_tensor[oz] = available_buffer[zo]\n target_tensor[zo] *= 1j\n target_tensor[oz] *= 1j\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('iSwap', 'iSwap'),\n exponent=self._diagram_exponent(args))\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'ISWAP'\n return 'ISWAP**{!r}'.format(self._exponent)\n\n def __repr__(self):\n if self._exponent == 1:\n return 'cirq.ISWAP'\n return '(cirq.ISWAP**{!r})'.format(self._exponent)\n\n\n# Swaps two qubits while phasing the swapped subspace by i.\nISWAP = ISwapPowGate()\n\n\ndef Rx(rads: float) -> XPowGate:\n \"\"\"Returns a gate with the matrix e^{-i X rads / 2}.\"\"\"\n return XPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Ry(rads: float) -> YPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Y rads / 2}.\"\"\"\n return YPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Rz(rads: float) -> ZPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Z rads / 2}.\"\"\"\n return ZPowGate(exponent=rads / np.pi, global_shift=-0.5)\n",
"path": "cirq/ops/common_gates.py"
}
] | [
{
"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates that are commonly used in the literature.\"\"\"\nfrom typing import (\n Union, Tuple, Optional, List, Callable, cast, Iterable, Sequence,\n Any)\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, value\nfrom cirq.ops import (\n gate_features,\n eigen_gate,\n raw_types,\n gate_operation,\n)\nfrom cirq.type_workarounds import NotImplementedType\n\n# Note: avoiding 'from/as' because it creates a circular dependency in python 2.\nimport cirq.ops.phased_x_gate\n\n\nclass CZPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"Phases the |11⟩ state of two adjacent qubits by a fixed amount.\n\n A ParameterizedCZGate guaranteed to not be using the parameter key field.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 1, 1, 0])),\n (1, np.diag([0, 0, 0, 1])),\n ]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if protocols.is_parameterized(self):\n return NotImplemented\n\n c = 1j**(2 * self._exponent)\n one_one = linalg.slice_for_qubits_equal_to(axes, 0b11)\n target_tensor[one_one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _phase_by_(self, phase_turns, qubit_index):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', '@'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cz {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CZ'\n return 'CZ**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.CZ'\n return '(cirq.CZ**{!r})'.format(self._exponent)\n\n\ndef _rads_func_symbol(func_name: str,\n args: protocols.CircuitDiagramInfoArgs,\n half_turns: Any) -> str:\n unit = 'π' if args.use_unicode_characters else 'pi'\n if half_turns == 1:\n return '{}({})'.format(func_name, unit)\n if half_turns == -1:\n return '{}(-{})'.format(func_name, unit)\n return '{}({}{})'.format(func_name, half_turns, unit)\n\n\nclass XPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the X axis of the Bloch sphere.\"\"\"\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n zero = linalg.slice_for_qubits_equal_to(axes, 0)\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n available_buffer[zero] = target_tensor[one]\n available_buffer[one] = target_tensor[zero]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n available_buffer *= p\n return available_buffer\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, 0.5], [0.5, 0.5]])),\n (1, np.array([[0.5, -0.5], [-0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rx',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('X',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('x {0};\\n', qubits[0])\n else:\n return args.format('rx({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'X'\n return 'X**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Rx(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.X'\n return '(cirq.X**{!r})'.format(self._exponent)\n return (\n 'cirq.XPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass YPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the Y axis of the Bloch sphere.\"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, -0.5j], [0.5j, 0.5]])),\n (1, np.array([[0.5, 0.5j], [-0.5j, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Ry',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Y',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('y {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=0.5 + phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'Y'\n return 'Y**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Ry(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.Y'\n return '(cirq.Y**{!r})'.format(self._exponent)\n return (\n 'cirq.YPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass ZPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"Fixed rotation around the Z axis of the Bloch sphere.\"\"\"\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if protocols.is_parameterized(self):\n return NotImplemented\n\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n c = 1j**(self._exponent * 2)\n target_tensor[one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rz',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n e = self._diagram_exponent(args)\n if e in [-0.25, 0.25]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('T',),\n exponent=cast(float, e) * 4)\n\n if e in [-0.5, 0.5]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('S',),\n exponent=cast(float, e) * 2)\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Z',),\n exponent=e)\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('z {0};\\n', qubits[0])\n else:\n return args.format('rz({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def __str__(self) -> str:\n if self._exponent == 0.25:\n return 'T'\n if self._exponent == -0.25:\n return 'T**-1'\n if self._exponent == 0.5:\n return 'S'\n if self._exponent == -0.5:\n return 'S**-1'\n if self._exponent == 1:\n return 'Z'\n return 'Z**{}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5:\n return 'cirq.Rz(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 0.25:\n return 'cirq.T'\n if self._exponent == -0.25:\n return '(cirq.T**-1)'\n if self._exponent == 0.5:\n return 'cirq.S'\n if self._exponent == -0.5:\n return '(cirq.S**-1)'\n if self._exponent == 1:\n return 'cirq.Z'\n return '(cirq.Z**{!r})'.format(self._exponent)\n return (\n 'cirq.ZPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\[email protected]_equality\nclass MeasurementGate(raw_types.Gate):\n \"\"\"Indicates that qubits should be measured plus a key to identify results.\n\n Attributes:\n key: The string key of the measurement.\n invert_mask: A list of values indicating whether the corresponding\n qubits should be flipped. The list's length must not be longer than\n the number of qubits, but it is permitted to be shorted.\n Qubits with indices past the end of the mask are not flipped.\n \"\"\"\n\n def __init__(self,\n key: str = '',\n invert_mask: Tuple[bool, ...] = ()) -> None:\n self.key = key\n self.invert_mask = invert_mask or ()\n\n @staticmethod\n def is_measurement(op: Union[raw_types.Gate, raw_types.Operation]) -> bool:\n if isinstance(op, MeasurementGate):\n return True\n if (isinstance(op, gate_operation.GateOperation) and\n isinstance(op.gate, MeasurementGate)):\n return True\n return False\n\n def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':\n \"\"\"Toggles whether or not the measurement inverts various outputs.\"\"\"\n old_mask = self.invert_mask or ()\n n = max(len(old_mask) - 1, *bit_positions) + 1\n new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]\n for b in bit_positions:\n new_mask[b] = not new_mask[b]\n return MeasurementGate(key=self.key, invert_mask=tuple(new_mask))\n\n def validate_args(self, qubits):\n if (self.invert_mask is not None and\n len(self.invert_mask) > len(qubits)):\n raise ValueError('len(invert_mask) > len(qubits)')\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n n = (max(1, len(self.invert_mask))\n if args.known_qubit_count is None\n else args.known_qubit_count)\n symbols = ['M'] * n\n\n # Show which output bits are negated.\n if self.invert_mask:\n for i, b in enumerate(self.invert_mask):\n if b:\n symbols[i] = '!M'\n\n # Mention the measurement key.\n if (not args.known_qubits or\n self.key != _default_measurement_key(args.known_qubits)):\n symbols[0] += \"('{}')\".format(self.key)\n\n return protocols.CircuitDiagramInfo(tuple(symbols))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = (invert_mask\n + (False,) * (len(qubits) - len(invert_mask)))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(args.format(\n 'x {0}; // Invert the following measurement\\n', qubit))\n lines.append(args.format('measure {0} -> {1:meas}[{2}];\\n',\n qubit, self.key, i))\n return ''.join(lines)\n\n def __repr__(self):\n return 'cirq.MeasurementGate({}, {})'.format(repr(self.key),\n repr(self.invert_mask))\n\n def _value_equality_values_(self):\n return self.key, self.invert_mask\n\n\ndef _default_measurement_key(qubits: Iterable[raw_types.QubitId]) -> str:\n return ','.join(str(q) for q in qubits)\n\n\ndef measure(*qubits: raw_types.QubitId,\n key: Optional[str] = None,\n invert_mask: Tuple[bool, ...] = ()\n ) -> gate_operation.GateOperation:\n \"\"\"Returns a single MeasurementGate applied to all the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits that the measurement gate should measure.\n key: The string key of the measurement. If this is None, it defaults\n to a comma-separated list of the target qubits' str values.\n invert_mask: A list of Truthy or Falsey values indicating whether\n the corresponding qubits should be flipped. None indicates no\n inverting should be done.\n\n Returns:\n An operation targeting the given qubits with a measurement.\n\n Raises:\n ValueError if the qubits are not instances of QubitId.\n \"\"\"\n for qubit in qubits:\n if isinstance(qubit, np.ndarray):\n raise ValueError(\n 'measure() was called a numpy ndarray. Perhaps you meant '\n 'to call measure_state_vector on numpy array?'\n )\n elif not isinstance(qubit, raw_types.QubitId):\n raise ValueError(\n 'measure() was called with type different than QubitId.')\n\n if key is None:\n key = _default_measurement_key(qubits)\n return MeasurementGate(key, invert_mask).on(*qubits)\n\n\ndef measure_each(*qubits: raw_types.QubitId,\n key_func: Callable[[raw_types.QubitId], str] = str\n ) -> List[gate_operation.GateOperation]:\n \"\"\"Returns a list of operations individually measuring the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits to measure.\n key_func: Determines the key of the measurements of each qubit. Takes\n the qubit and returns the key for that qubit. Defaults to str.\n\n Returns:\n A list of operations individually measuring the given qubits.\n \"\"\"\n return [MeasurementGate(key_func(q)).on(q) for q in qubits]\n\n\nX = XPowGate() # Pauli X gate.\nY = YPowGate() # Pauli Y gate.\nZ = ZPowGate() # Pauli Z gate.\nCZ = CZPowGate() # Negates the amplitude of the |11⟩ state.\n\nS = Z**0.5\nT = Z**0.25\n\n\nclass HPowGate(eigen_gate.EigenGate, gate_features.SingleQubitGate):\n \"\"\"Rotation around the X+Z axis of the Bloch sphere.\"\"\"\n\n def _eigen_components(self):\n s = np.sqrt(2)\n\n component0 = np.array([\n [3 + 2 * s, 1 + s],\n [1 + s, 1]\n ]) / (4 + 2 * s)\n\n component1 = np.array([\n [3 - 2 * s, 1 - s],\n [1 - s, 1]\n ]) / (4 - 2 * s)\n\n return [(0, component0), (1, component1)]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zero = linalg.slice_for_qubits_equal_to(axes, 0)\n one = linalg.slice_for_qubits_equal_to(axes, 1)\n target_tensor[one] -= target_tensor[zero]\n target_tensor[one] *= -0.5\n target_tensor[zero] -= target_tensor[one]\n p = 1j**(2 * self._exponent * self._global_shift)\n target_tensor *= np.sqrt(2) * p\n return target_tensor\n\n def _decompose_(self, qubits):\n q = qubits[0]\n\n if self._exponent == 1:\n yield cirq.Y(q)**0.5\n yield cirq.XPowGate(global_shift=-0.25).on(q)\n return\n\n yield Y(q)**0.25\n yield X(q)**self._exponent\n yield Y(q)**-0.25\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(('H',))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('h {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {3};\\n'\n 'rx({1:half_turns}) {3};\\n'\n 'ry({2:half_turns}) {3};\\n',\n 0.25, self._exponent, -0.25, qubits[0])\n\n def __str__(self):\n if self._exponent == 1:\n return 'H'\n return 'H^{}'.format(self._exponent)\n\n def __repr__(self):\n if self._exponent == 1:\n return 'cirq.H'\n return '(cirq.H**{!r})'.format(self._exponent)\n\n\nH = HPowGate() # Hadamard gate.\n\n\nclass CNotPowGate(eigen_gate.EigenGate, gate_features.TwoQubitGate):\n \"\"\"The controlled-not gate, possibly raised to a power.\n\n When applying CNOT (controlled-not) to QuBits, you can either use\n positional arguments CNOT(q1, q2), where q2 is toggled when q1 is on,\n or named arguments CNOT(control=q1, target=q2).\n (Mixing the two is not permitted.)\n \"\"\"\n\n def _decompose_(self, qubits):\n c, t = qubits\n yield Y(t)**-0.5\n yield CZ(c, t)**self._exponent\n yield Y(t)**0.5\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0.5, 0.5],\n [0, 0, 0.5, 0.5]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0.5, -0.5],\n [0, 0, -0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', 'X'),\n exponent=self._diagram_exponent(args))\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n oo = linalg.slice_for_qubits_equal_to(axes, 0b11)\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n available_buffer[oo] = target_tensor[oo]\n target_tensor[oo] = target_tensor[zo]\n target_tensor[zo] = available_buffer[oo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cx {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CNOT'\n return 'CNOT**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.CNOT'\n return '(cirq.CNOT**{!r})'.format(self._exponent)\n\n def on(self, *args: raw_types.QubitId,\n **kwargs: raw_types.QubitId) -> gate_operation.GateOperation:\n if not kwargs:\n return super().on(*args)\n if not args and set(kwargs.keys()) == {'control', 'target'}:\n return super().on(kwargs['control'], kwargs['target'])\n raise ValueError(\n \"Expected two positional argument or else 'target' AND 'control' \"\n \"keyword arguments. But got args={!r}, kwargs={!r}.\".format(\n args, kwargs))\n\n\nCNOT = CNotPowGate() # Controlled Not Gate.\n\n\nclass SwapPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"The SWAP gate, possibly raised to a power. Exchanges qubits.\"\"\"\n\n def _decompose_(self, qubits):\n \"\"\"See base class.\"\"\"\n a, b = qubits\n yield CNOT(a, b)\n yield CNOT(b, a) ** self._exponent\n yield CNOT(a, b)\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 1]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n oz = linalg.slice_for_qubits_equal_to(axes, 0b10)\n available_buffer[zo] = target_tensor[zo]\n target_tensor[zo] = target_tensor[oz]\n target_tensor[oz] = available_buffer[zo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n if not args.use_unicode_characters:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('swap', 'swap'),\n exponent=self._diagram_exponent(args))\n return protocols.CircuitDiagramInfo(\n wire_symbols=('×', '×'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('swap {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'SWAP'\n return 'SWAP**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._exponent == 1:\n return 'cirq.SWAP'\n return '(cirq.SWAP**{!r})'.format(self._exponent)\n\n\nSWAP = SwapPowGate() # Exchanges two qubits' states.\n\n\nclass ISwapPowGate(eigen_gate.EigenGate,\n gate_features.InterchangeableQubitsGate,\n gate_features.TwoQubitGate):\n \"\"\"Rotates the |01⟩-vs-|10⟩ subspace of two qubits around its Bloch X-axis.\n\n When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More\n generally, this gate's matrix is defined as follows:\n\n ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)\n ≡ [1 0 0 0]\n [0 cos(π·t/2) i·sin(π·t/2) 0]\n [0 i·sin(π·t/2) cos(π·t/2) 0]\n [0 0 0 1]\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0, 0, 1])),\n (+0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n (-0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _decompose_(self, qubits):\n a, b = qubits\n\n yield CNOT(a, b)\n yield H(a)\n yield CNOT(b, a)\n yield S(a)**self._exponent\n yield CNOT(b, a)\n yield S(a)**-self._exponent\n yield H(a)\n yield CNOT(a, b)\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> Union[np.ndarray, NotImplementedType]:\n if self._exponent != 1:\n return NotImplemented\n\n zo = linalg.slice_for_qubits_equal_to(axes, 0b01)\n oz = linalg.slice_for_qubits_equal_to(axes, 0b10)\n available_buffer[zo] = target_tensor[zo]\n target_tensor[zo] = target_tensor[oz]\n target_tensor[oz] = available_buffer[zo]\n target_tensor[zo] *= 1j\n target_tensor[oz] *= 1j\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n target_tensor *= p\n return target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('iSwap', 'iSwap'),\n exponent=self._diagram_exponent(args))\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'ISWAP'\n return 'ISWAP**{!r}'.format(self._exponent)\n\n def __repr__(self):\n if self._exponent == 1:\n return 'cirq.ISWAP'\n return '(cirq.ISWAP**{!r})'.format(self._exponent)\n\n\n# Swaps two qubits while phasing the swapped subspace by i.\nISWAP = ISwapPowGate()\n\n\ndef Rx(rads: float) -> XPowGate:\n \"\"\"Returns a gate with the matrix e^{-i X rads / 2}.\"\"\"\n return XPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Ry(rads: float) -> YPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Y rads / 2}.\"\"\"\n return YPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Rz(rads: float) -> ZPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Z rads / 2}.\"\"\"\n return ZPowGate(exponent=rads / np.pi, global_shift=-0.5)\n",
"path": "cirq/ops/common_gates.py"
}
] | diff --git a/cirq/ops/common_gates.py b/cirq/ops/common_gates.py
index 0ab85680bdc..0c8bc36a218 100644
--- a/cirq/ops/common_gates.py
+++ b/cirq/ops/common_gates.py
@@ -518,7 +518,8 @@ def _decompose_(self, qubits):
q = qubits[0]
if self._exponent == 1:
- yield Y(q)**0.5, X(q)
+ yield cirq.Y(q)**0.5
+ yield cirq.XPowGate(global_shift=-0.25).on(q)
return
yield Y(q)**0.25
diff --git a/cirq/testing/consistent_decomposition.py b/cirq/testing/consistent_decomposition.py
index aac0f041788..90dc3b877bc 100644
--- a/cirq/testing/consistent_decomposition.py
+++ b/cirq/testing/consistent_decomposition.py
@@ -14,8 +14,9 @@
from typing import Any
+import numpy as np
+
from cirq import protocols, ops, line, circuits
-from cirq.testing import lin_alg_utils
def assert_decompose_is_consistent_with_unitary(val: Any):
@@ -36,6 +37,4 @@ def assert_decompose_is_consistent_with_unitary(val: Any):
actual = circuits.Circuit.from_ops(dec).to_unitary_matrix(
qubit_order=qubits)
- lin_alg_utils.assert_allclose_up_to_global_phase(actual,
- expected,
- atol=1e-8)
+ assert np.allclose(actual, expected, atol=1e-8)
|
PaddlePaddle__PaddleSpeech-19 | Fix some problems in the ctc beam search decoder
- [x] Make character's index in FST starting from one, otherwise wrong decoding results would be produced especially when space is the first character in the vocabulary;
- [x] Add version check in the setup script;
- [x] Remove unused code.
| [
{
"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n",
"path": "decoders/swig/setup.py"
}
] | [
{
"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='1.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n",
"path": "decoders/swig/setup.py"
}
] | diff --git a/decoders/swig/path_trie.cpp b/decoders/swig/path_trie.cpp
index 40d90970556..152efa82c64 100644
--- a/decoders/swig/path_trie.cpp
+++ b/decoders/swig/path_trie.cpp
@@ -52,7 +52,7 @@ PathTrie* PathTrie::get_path_trie(int new_char, bool reset) {
} else {
if (has_dictionary_) {
matcher_->SetState(dictionary_state_);
- bool found = matcher_->Find(new_char);
+ bool found = matcher_->Find(new_char + 1);
if (!found) {
// Adding this character causes word outside dictionary
auto FSTZERO = fst::TropicalWeight::Zero();
diff --git a/decoders/swig/scorer.cpp b/decoders/swig/scorer.cpp
index 686c67c77e1..27b61cd033e 100644
--- a/decoders/swig/scorer.cpp
+++ b/decoders/swig/scorer.cpp
@@ -149,13 +149,15 @@ void Scorer::set_char_map(const std::vector<std::string>& char_list) {
char_list_ = char_list;
char_map_.clear();
+ // Set the char map for the FST for spelling correction
for (size_t i = 0; i < char_list_.size(); i++) {
if (char_list_[i] == " ") {
SPACE_ID_ = i;
- char_map_[' '] = i;
- } else if (char_list_[i].size() == 1) {
- char_map_[char_list_[i][0]] = i;
}
+ // The initial state of FST is state 0, hence the index of chars in
+ // the FST should start from 1 to avoid the conflict with the initial
+ // state, otherwise wrong decoding results would be given.
+ char_map_[char_list_[i]] = i + 1;
}
}
@@ -193,17 +195,11 @@ std::vector<std::string> Scorer::make_ngram(PathTrie* prefix) {
void Scorer::fill_dictionary(bool add_space) {
fst::StdVectorFst dictionary;
- // First reverse char_list so ints can be accessed by chars
- std::unordered_map<std::string, int> char_map;
- for (size_t i = 0; i < char_list_.size(); i++) {
- char_map[char_list_[i]] = i;
- }
-
// For each unigram convert to ints and put in trie
int dict_size = 0;
for (const auto& word : vocabulary_) {
bool added = add_word_to_dictionary(
- word, char_map, add_space, SPACE_ID_, &dictionary);
+ word, char_map_, add_space, SPACE_ID_ + 1, &dictionary);
dict_size += added ? 1 : 0;
}
diff --git a/decoders/swig/scorer.h b/decoders/swig/scorer.h
index 61836463597..5ebc719c701 100644
--- a/decoders/swig/scorer.h
+++ b/decoders/swig/scorer.h
@@ -104,7 +104,7 @@ class Scorer {
int SPACE_ID_;
std::vector<std::string> char_list_;
- std::unordered_map<char, int> char_map_;
+ std::unordered_map<std::string, int> char_map_;
std::vector<std::string> vocabulary_;
};
diff --git a/decoders/swig/setup.py b/decoders/swig/setup.py
index b6bc0ca06af..a4bb2e9dadb 100644
--- a/decoders/swig/setup.py
+++ b/decoders/swig/setup.py
@@ -113,7 +113,7 @@ def compile_test(header, library):
setup(
name='swig_decoders',
- version='1.0',
+ version='1.1',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['swig_decoders'], )
diff --git a/setup.sh b/setup.sh
index 7c40415db32..ec5e47ec8fc 100644
--- a/setup.sh
+++ b/setup.sh
@@ -27,7 +27,7 @@ if [ $? != 0 ]; then
fi
# install decoders
-python -c "import swig_decoders"
+python -c "import pkg_resources; pkg_resources.require(\"swig_decoders==1.1\")"
if [ $? != 0 ]; then
cd decoders/swig > /dev/null
sh setup.sh
|
iterative__dvc-7234 | dvc.fs.Path.parts wrong results
**EDIT**: This issue will just be for this first problem of handling a sep at the end of a path. I made the windows-style path problem a separate issue #7233
When a path ends with the path sep, the `parts` function doesn't split. It returns a tuple with a single item:
```python
from dvc.fs.path import Path
Path('/').parts('/a/b/c/')
```
```python
('/a/b/c',)
```
A second problem occurs when using windows style paths. We get the sep between the drive and the rest of the path:
```python
Path('\\').parts('c:\\a')
```
```python
('c:', '\\', 'a')
```
The first problem could be solved by simply stripping the final sep:
```python
drive, path = self.flavour.splitdrive(path.rstrip(self.flavour.sep))
```
but the second problem would still exist.
We should really get these results:
```python
Path('/').parts('/a/b/c/')
```
```python
('/', 'a', 'b', 'c')
```
and
```python
Path('\\').parts('c:\\a')
```
```python
('c:', 'a')
```
Note the second case is still a little different from pathlib, which would include the sep with the drive:
```python
from pathlib import PureWindowsPath
PureWindowsPath('c:\\a').parts
```
```python
('c:\\', 'a')
```
but this is probably more in-line with fsspec, which basically treats the drive letter as the first element of a relative path:
```python
fsspec.AbstractFileSystem._parent('c:/a')
```
```python
'c:'
```
version info:
```
DVC version: 2.9.4.dev28+gd90fe54d.d20220106
---------------------------------
Platform: Python 3.10.1 on Linux-5.15.11-arch2-1-x86_64-with-glibc2.33
Supports:
azure (adlfs = 2021.10.0, knack = 0.9.0, azure-identity = 1.7.1),
gdrive (pydrive2 = 1.10.0),
gs (gcsfs = 2021.11.1),
hdfs (fsspec = 2021.11.1, pyarrow = 6.0.1),
webhdfs (fsspec = 2021.11.1),
http (aiohttp = 3.8.1, aiohttp-retry = 2.4.6),
https (aiohttp = 3.8.1, aiohttp-retry = 2.4.6),
s3 (s3fs = 2021.11.1, boto3 = 1.19.8),
ssh (sshfs = 2021.11.2),
oss (ossfs = 2021.8.0),
webdav (webdav4 = 0.9.3),
webdavs (webdav4 = 0.9.3)
Cache types: <https://error.dvc.org/no-dvc-cache>
Caches: local
Remotes: https
Workspace directory: btrfs on /dev/mapper/nvme0n1p3_crypt
Repo: dvc, git
```
| [
{
"content": "import ntpath\nimport posixpath\n\n\nclass Path:\n def __init__(self, sep):\n if sep == posixpath.sep:\n self.flavour = posixpath\n elif sep == ntpath.sep:\n self.flavour = ntpath\n else:\n raise ValueError(f\"unsupported separator '{sep}'\")\n\n def join(self, *parts):\n return self.flavour.join(*parts)\n\n def parts(self, path):\n drive, path = self.flavour.splitdrive(path)\n\n ret = []\n while True:\n path, part = self.flavour.split(path)\n\n if part:\n ret.append(part)\n continue\n\n if path:\n ret.append(path)\n\n break\n\n ret.reverse()\n\n if drive:\n ret = [drive] + ret\n\n return tuple(ret)\n\n def parent(self, path):\n return self.flavour.dirname(path)\n\n def parents(self, path):\n parts = self.parts(path)\n return tuple(\n self.join(*parts[:length])\n for length in range(len(parts) - 1, 0, -1)\n )\n\n def name(self, path):\n return self.parts(path)[-1]\n\n def suffix(self, path):\n name = self.name(path)\n _, dot, suffix = name.partition(\".\")\n return dot + suffix\n\n def with_name(self, path, name):\n parts = list(self.parts(path))\n parts[-1] = name\n return self.join(*parts)\n\n def with_suffix(self, path, suffix):\n parts = list(self.parts(path))\n real_path, _, _ = parts[-1].partition(\".\")\n parts[-1] = real_path + suffix\n return self.join(*parts)\n\n def isin(self, left, right):\n left_parts = self.parts(left)\n right_parts = self.parts(right)\n left_len = len(left_parts)\n right_len = len(right_parts)\n return left_len > right_len and left_parts[:right_len] == right_parts\n\n def isin_or_eq(self, left, right):\n return left == right or self.isin(left, right)\n\n def overlaps(self, left, right):\n # pylint: disable=arguments-out-of-order\n return self.isin_or_eq(left, right) or self.isin(right, left)\n\n def relpath(self, path, start):\n assert start\n return self.flavour.relpath(path, start=start)\n\n def relparts(self, path, base):\n return self.parts(self.relpath(path, base))\n\n def as_posix(self, path):\n return path.replace(self.flavour.sep, posixpath.sep)\n",
"path": "dvc/fs/path.py"
}
] | [
{
"content": "import ntpath\nimport posixpath\n\n\nclass Path:\n def __init__(self, sep):\n if sep == posixpath.sep:\n self.flavour = posixpath\n elif sep == ntpath.sep:\n self.flavour = ntpath\n else:\n raise ValueError(f\"unsupported separator '{sep}'\")\n\n def join(self, *parts):\n return self.flavour.join(*parts)\n\n def parts(self, path):\n drive, path = self.flavour.splitdrive(path.rstrip(self.flavour.sep))\n\n ret = []\n while True:\n path, part = self.flavour.split(path)\n\n if part:\n ret.append(part)\n continue\n\n if path:\n ret.append(path)\n\n break\n\n ret.reverse()\n\n if drive:\n ret = [drive] + ret\n\n return tuple(ret)\n\n def parent(self, path):\n return self.flavour.dirname(path)\n\n def parents(self, path):\n parts = self.parts(path)\n return tuple(\n self.join(*parts[:length])\n for length in range(len(parts) - 1, 0, -1)\n )\n\n def name(self, path):\n return self.parts(path)[-1]\n\n def suffix(self, path):\n name = self.name(path)\n _, dot, suffix = name.partition(\".\")\n return dot + suffix\n\n def with_name(self, path, name):\n parts = list(self.parts(path))\n parts[-1] = name\n return self.join(*parts)\n\n def with_suffix(self, path, suffix):\n parts = list(self.parts(path))\n real_path, _, _ = parts[-1].partition(\".\")\n parts[-1] = real_path + suffix\n return self.join(*parts)\n\n def isin(self, left, right):\n left_parts = self.parts(left)\n right_parts = self.parts(right)\n left_len = len(left_parts)\n right_len = len(right_parts)\n return left_len > right_len and left_parts[:right_len] == right_parts\n\n def isin_or_eq(self, left, right):\n return left == right or self.isin(left, right)\n\n def overlaps(self, left, right):\n # pylint: disable=arguments-out-of-order\n return self.isin_or_eq(left, right) or self.isin(right, left)\n\n def relpath(self, path, start):\n assert start\n return self.flavour.relpath(path, start=start)\n\n def relparts(self, path, base):\n return self.parts(self.relpath(path, base))\n\n def as_posix(self, path):\n return path.replace(self.flavour.sep, posixpath.sep)\n",
"path": "dvc/fs/path.py"
}
] | diff --git a/dvc/fs/path.py b/dvc/fs/path.py
index b0c02db8c7..7545fb7a88 100644
--- a/dvc/fs/path.py
+++ b/dvc/fs/path.py
@@ -15,7 +15,7 @@ def join(self, *parts):
return self.flavour.join(*parts)
def parts(self, path):
- drive, path = self.flavour.splitdrive(path)
+ drive, path = self.flavour.splitdrive(path.rstrip(self.flavour.sep))
ret = []
while True:
diff --git a/tests/unit/fs/test_path.py b/tests/unit/fs/test_path.py
new file mode 100644
index 0000000000..caf41342c6
--- /dev/null
+++ b/tests/unit/fs/test_path.py
@@ -0,0 +1,29 @@
+import pytest
+
+from dvc.fs.path import Path
+
+
[email protected]("prefix", ["", "/"])
[email protected]("postfix", ["", "/"])
[email protected](
+ "path,expected",
+ [
+ ("path", ("path",)),
+ ("some/path", ("some", "path")),
+ ],
+)
+def test_parts_posix(prefix, postfix, path, expected):
+ assert Path("/").parts(prefix + path + postfix) == tuple(prefix) + expected
+
+
[email protected]("postfix", ["", "\\"])
[email protected](
+ "path,expected",
+ [
+ ("path", ("path",)),
+ ("c:\\path", ("c:", "\\", "path")),
+ ("some\\path", ("some", "path")),
+ ],
+)
+def test_parts_nt(postfix, path, expected):
+ assert Path("\\").parts(path + postfix) == expected
|
inventree__InvenTree-4843 | PanelMixin get_custom_panels not getting called for part list view
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
I want to add a custom part import panel, for that I'm trying to use the PanelMixin for my plugin.
But I realized the part list view "http://inventree_server/part/" ignores the plugin and doesn't call the get_custom_panels method.
### Steps to Reproduce
create plugin with PanelMixin that always returns a test panel.
open part list, the panel doesn't show
### Expected behaviour
The panel should show
### Deployment Method
- [ ] Docker
- [X] Bare metal
### Version Information
# Version Information:
InvenTree-Version: 0.10.0
Django Version: 3.2.16
Database: sqlite3
Debug-Mode: False
Deployed using Docker: False
Active plugins: [{'name': 'InvenTreeBarcode', 'slug': 'inventreebarcode', 'version': '2.0.0'}, {'name': 'InvenTreeCoreNotificationsPlugin', 'slug': 'inventreecorenotificationsplugin', 'version': '1.0.0'}, {'name': 'EMEImport', 'slug': 'emeimport', 'version': '0.0.1'}]
### Relevant log output
_No response_
| [
{
"content": "\"\"\"Django views for interacting with Part app.\"\"\"\n\nimport os\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import HttpResponseRedirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DetailView, ListView\n\nfrom common.files import FileManager\nfrom common.models import InvenTreeSetting\nfrom common.views import FileManagementAjaxView, FileManagementFormView\nfrom company.models import SupplierPart\nfrom InvenTree.helpers import str2bool, str2int\nfrom InvenTree.views import AjaxUpdateView, AjaxView, InvenTreeRoleMixin\nfrom plugin.views import InvenTreePluginViewMixin\nfrom stock.models import StockItem, StockLocation\n\nfrom . import forms as part_forms\nfrom . import settings as part_settings\nfrom .bom import ExportBom, IsValidBOMFormat, MakeBomTemplate\nfrom .models import Part, PartCategory\nfrom .part import MakePartTemplate\n\n\nclass PartIndex(InvenTreeRoleMixin, ListView):\n \"\"\"View for displaying list of Part objects.\"\"\"\n\n model = Part\n template_name = 'part/category.html'\n context_object_name = 'parts'\n\n def get_queryset(self):\n \"\"\"Custom queryset lookup to prefetch related fields\"\"\"\n return Part.objects.all().select_related('category')\n\n def get_context_data(self, **kwargs):\n \"\"\"Returns custom context data for the PartIndex view:\n\n - children: Number of child categories\n - category_count: Number of child categories\n - part_count: Number of parts contained\n \"\"\"\n context = super().get_context_data(**kwargs).copy()\n\n # View top-level categories\n children = PartCategory.objects.filter(parent=None)\n\n context['children'] = children\n context['category_count'] = PartCategory.objects.count()\n context['part_count'] = Part.objects.count()\n\n return context\n\n\nclass PartImport(FileManagementFormView):\n \"\"\"Part: Upload file, match to fields and import parts(using multi-Step form)\"\"\"\n permission_required = 'part.add'\n\n class PartFileManager(FileManager):\n \"\"\"Import field definitions\"\"\"\n REQUIRED_HEADERS = [\n 'Name',\n 'Description',\n ]\n\n OPTIONAL_MATCH_HEADERS = [\n 'Category',\n 'default_location',\n 'default_supplier',\n 'variant_of',\n ]\n\n OPTIONAL_HEADERS = [\n 'Keywords',\n 'IPN',\n 'Revision',\n 'Link',\n 'default_expiry',\n 'minimum_stock',\n 'Units',\n 'Notes',\n 'Active',\n 'base_cost',\n 'Multiple',\n 'Assembly',\n 'Component',\n 'is_template',\n 'Purchasable',\n 'Salable',\n 'Trackable',\n 'Virtual',\n 'Stock',\n 'Image',\n ]\n\n name = 'part'\n form_steps_template = [\n 'part/import_wizard/part_upload.html',\n 'part/import_wizard/match_fields.html',\n 'part/import_wizard/match_references.html',\n ]\n form_steps_description = [\n _(\"Upload File\"),\n _(\"Match Fields\"),\n _(\"Match References\"),\n ]\n\n form_field_map = {\n 'name': 'name',\n 'description': 'description',\n 'keywords': 'keywords',\n 'ipn': 'ipn',\n 'revision': 'revision',\n 'link': 'link',\n 'default_expiry': 'default_expiry',\n 'minimum_stock': 'minimum_stock',\n 'units': 'units',\n 'notes': 'notes',\n 'category': 'category',\n 'default_location': 'default_location',\n 'default_supplier': 'default_supplier',\n 'variant_of': 'variant_of',\n 'active': 'active',\n 'base_cost': 'base_cost',\n 'multiple': 'multiple',\n 'assembly': 'assembly',\n 'component': 'component',\n 'is_template': 'is_template',\n 'purchaseable': 'purchaseable',\n 'salable': 'salable',\n 'trackable': 'trackable',\n 'virtual': 'virtual',\n 'stock': 'stock',\n 'image': 'image',\n }\n file_manager_class = PartFileManager\n\n def get_field_selection(self):\n \"\"\"Fill the form fields for step 3.\"\"\"\n # fetch available elements\n self.allowed_items = {}\n self.matches = {}\n\n self.allowed_items['Category'] = PartCategory.objects.all().exclude(structural=True)\n self.matches['Category'] = ['name__icontains']\n self.allowed_items['default_location'] = StockLocation.objects.all().exclude(structural=True)\n self.matches['default_location'] = ['name__icontains']\n self.allowed_items['default_supplier'] = SupplierPart.objects.all()\n self.matches['default_supplier'] = ['SKU__icontains']\n self.allowed_items['variant_of'] = Part.objects.all().exclude(is_template=False)\n self.matches['variant_of'] = ['name__icontains']\n\n # setup\n self.file_manager.setup()\n # collect submitted column indexes\n col_ids = {}\n for col in self.file_manager.HEADERS:\n index = self.get_column_index(col)\n if index >= 0:\n col_ids[col] = index\n\n # parse all rows\n for row in self.rows:\n # check each submitted column\n for idx in col_ids:\n data = row['data'][col_ids[idx]]['cell']\n\n if idx in self.file_manager.OPTIONAL_MATCH_HEADERS:\n try:\n exact_match = self.allowed_items[idx].get(**{a: data for a in self.matches[idx]})\n except (ValueError, self.allowed_items[idx].model.DoesNotExist, self.allowed_items[idx].model.MultipleObjectsReturned):\n exact_match = None\n\n row['match_options_' + idx] = self.allowed_items[idx]\n row['match_' + idx] = exact_match\n continue\n\n # general fields\n row[idx.lower()] = data\n\n def done(self, form_list, **kwargs):\n \"\"\"Create items.\"\"\"\n items = self.get_clean_items()\n\n import_done = 0\n import_error = []\n\n # Create Part instances\n for part_data in items.values():\n\n # set related parts\n optional_matches = {}\n for idx in self.file_manager.OPTIONAL_MATCH_HEADERS:\n if idx.lower() in part_data:\n try:\n optional_matches[idx] = self.allowed_items[idx].get(pk=int(part_data[idx.lower()]))\n except (ValueError, self.allowed_items[idx].model.DoesNotExist, self.allowed_items[idx].model.MultipleObjectsReturned):\n optional_matches[idx] = None\n else:\n optional_matches[idx] = None\n\n # add part\n new_part = Part(\n name=part_data.get('name', ''),\n description=part_data.get('description', ''),\n keywords=part_data.get('keywords', None),\n IPN=part_data.get('ipn', None),\n revision=part_data.get('revision', None),\n link=part_data.get('link', None),\n default_expiry=str2int(part_data.get('default_expiry'), 0),\n minimum_stock=str2int(part_data.get('minimum_stock'), 0),\n units=part_data.get('units', None),\n notes=part_data.get('notes', None),\n category=optional_matches['Category'],\n default_location=optional_matches['default_location'],\n default_supplier=optional_matches['default_supplier'],\n variant_of=optional_matches['variant_of'],\n active=str2bool(part_data.get('active', True)),\n base_cost=str2int(part_data.get('base_cost'), 0),\n multiple=str2int(part_data.get('multiple'), 1),\n assembly=str2bool(part_data.get('assembly', part_settings.part_assembly_default())),\n component=str2bool(part_data.get('component', part_settings.part_component_default())),\n is_template=str2bool(part_data.get('is_template', part_settings.part_template_default())),\n purchaseable=str2bool(part_data.get('purchaseable', part_settings.part_purchaseable_default())),\n salable=str2bool(part_data.get('salable', part_settings.part_salable_default())),\n trackable=str2bool(part_data.get('trackable', part_settings.part_trackable_default())),\n virtual=str2bool(part_data.get('virtual', part_settings.part_virtual_default())),\n image=part_data.get('image', None),\n )\n\n # check if theres a category assigned, if not skip this part or else bad things happen\n if not optional_matches['Category']:\n import_error.append(_(\"Can't import part {name} because there is no category assigned\").format(name=new_part.name))\n continue\n\n try:\n new_part.save()\n\n # add stock item if set\n if part_data.get('stock', None):\n stock = StockItem(\n part=new_part,\n location=new_part.default_location,\n quantity=int(part_data.get('stock', 1)),\n )\n stock.save()\n\n import_done += 1\n except ValidationError as _e:\n import_error.append(', '.join(set(_e.messages)))\n\n # Set alerts\n if import_done:\n alert = f\"<strong>{_('Part-Import')}</strong><br>{_('Imported {n} parts').format(n=import_done)}\"\n messages.success(self.request, alert)\n if import_error:\n error_text = '\\n'.join([f'<li><strong>{import_error.count(a)}</strong>: {a}</li>' for a in set(import_error)])\n messages.error(self.request, f\"<strong>{_('Some errors occured:')}</strong><br><ul>{error_text}</ul>\")\n\n return HttpResponseRedirect(reverse('part-index'))\n\n\nclass PartImportTemplate(AjaxView):\n \"\"\"Provide a part import template file for download.\n\n - Generates a template file in the provided format e.g. ?format=csv\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform a GET request to download the 'Part import' template\"\"\"\n export_format = request.GET.get('format', 'csv')\n\n return MakePartTemplate(export_format)\n\n\nclass PartImportAjax(FileManagementAjaxView, PartImport):\n \"\"\"Multi-step form wizard for importing Part data\"\"\"\n ajax_form_steps_template = [\n 'part/import_wizard/ajax_part_upload.html',\n 'part/import_wizard/ajax_match_fields.html',\n 'part/import_wizard/ajax_match_references.html',\n ]\n\n def validate(self, obj, form, **kwargs):\n \"\"\"Validation is performed based on the current form step\"\"\"\n return PartImport.validate(self, self.steps.current, form, **kwargs)\n\n\nclass PartDetail(InvenTreeRoleMixin, InvenTreePluginViewMixin, DetailView):\n \"\"\"Detail view for Part object.\"\"\"\n\n context_object_name = 'part'\n queryset = Part.objects.all().select_related('category')\n template_name = 'part/detail.html'\n form_class = part_forms.PartPriceForm\n\n # Add in some extra context information based on query params\n def get_context_data(self, **kwargs):\n \"\"\"Provide extra context data to template.\"\"\"\n context = super().get_context_data(**kwargs)\n\n part = self.get_object()\n\n ctx = part.get_context_data(self.request)\n\n context.update(**ctx)\n\n return context\n\n def get_quantity(self):\n \"\"\"Return set quantity in decimal format.\"\"\"\n return Decimal(self.request.POST.get('quantity', 1))\n\n def get_part(self):\n \"\"\"Return the Part instance associated with this view\"\"\"\n return self.get_object()\n\n def get_initials(self):\n \"\"\"Returns initials for form.\"\"\"\n return {'quantity': self.get_quantity()}\n\n def post(self, request, *args, **kwargs):\n \"\"\"POST action performs as a GET action\"\"\"\n self.object = self.get_object()\n kwargs['object'] = self.object\n ctx = self.get_context_data(**kwargs)\n return self.get(request, context=ctx)\n\n\nclass PartDetailFromIPN(PartDetail):\n \"\"\"Part detail view using the IPN (internal part number) of the Part as the lookup field\"\"\"\n\n slug_field = 'IPN'\n slug_url_kwarg = 'slug'\n\n def get_object(self):\n \"\"\"Return Part object which IPN field matches the slug value.\"\"\"\n queryset = self.get_queryset()\n # Get slug\n slug = self.kwargs.get(self.slug_url_kwarg)\n\n if slug is not None:\n slug_field = self.get_slug_field()\n # Filter by the slug value\n queryset = queryset.filter(**{slug_field: slug})\n\n try:\n # Get unique part from queryset\n part = queryset.get()\n # Return Part object\n return part\n except queryset.model.MultipleObjectsReturned:\n pass\n except queryset.model.DoesNotExist:\n pass\n\n return None\n\n def get(self, request, *args, **kwargs):\n \"\"\"Attempt to match slug to a Part, else redirect to PartIndex view.\"\"\"\n self.object = self.get_object()\n\n if not self.object:\n return HttpResponseRedirect(reverse('part-index'))\n\n return super(PartDetailFromIPN, self).get(request, *args, **kwargs)\n\n\nclass PartImageSelect(AjaxUpdateView):\n \"\"\"View for selecting Part image from existing images.\"\"\"\n\n model = Part\n ajax_template_name = 'part/select_image.html'\n ajax_form_title = _('Select Part Image')\n\n fields = [\n 'image',\n ]\n\n def post(self, request, *args, **kwargs):\n \"\"\"Perform POST action to assign selected image to the Part instance\"\"\"\n part = self.get_object()\n form = self.get_form()\n\n img = request.POST.get('image', '')\n\n img = os.path.basename(img)\n\n data = {}\n\n if img:\n img_path = settings.MEDIA_ROOT.joinpath('part_images', img)\n\n # Ensure that the image already exists\n if os.path.exists(img_path):\n\n part.image = os.path.join('part_images', img)\n part.save()\n\n data['success'] = _('Updated part image')\n\n if 'success' not in data:\n data['error'] = _('Part image not found')\n\n return self.renderJsonResponse(request, form, data)\n\n\nclass BomUpload(InvenTreeRoleMixin, DetailView):\n \"\"\"View for uploading a BOM file, and handling BOM data importing.\"\"\"\n\n context_object_name = 'part'\n queryset = Part.objects.all()\n template_name = 'part/upload_bom.html'\n\n\nclass BomUploadTemplate(AjaxView):\n \"\"\"Provide a BOM upload template file for download.\n\n - Generates a template file in the provided format e.g. ?format=csv\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform a GET request to download the 'BOM upload' template\"\"\"\n export_format = request.GET.get('format', 'csv')\n\n return MakeBomTemplate(export_format)\n\n\nclass BomDownload(AjaxView):\n \"\"\"Provide raw download of a BOM file.\n\n - File format should be passed as a query param e.g. ?format=csv\n \"\"\"\n\n role_required = 'part.view'\n\n model = Part\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform GET request to download BOM data\"\"\"\n part = get_object_or_404(Part, pk=self.kwargs['pk'])\n\n export_format = request.GET.get('format', 'csv')\n\n cascade = str2bool(request.GET.get('cascade', False))\n\n parameter_data = str2bool(request.GET.get('parameter_data', False))\n\n substitute_part_data = str2bool(request.GET.get('substitute_part_data', False))\n\n stock_data = str2bool(request.GET.get('stock_data', False))\n\n supplier_data = str2bool(request.GET.get('supplier_data', False))\n\n manufacturer_data = str2bool(request.GET.get('manufacturer_data', False))\n\n pricing_data = str2bool(request.GET.get('pricing_data', False))\n\n levels = request.GET.get('levels', None)\n\n if levels is not None:\n try:\n levels = int(levels)\n\n if levels <= 0:\n levels = None\n\n except ValueError:\n levels = None\n\n if not IsValidBOMFormat(export_format):\n export_format = 'csv'\n\n return ExportBom(part,\n fmt=export_format,\n cascade=cascade,\n max_levels=levels,\n parameter_data=parameter_data,\n stock_data=stock_data,\n supplier_data=supplier_data,\n manufacturer_data=manufacturer_data,\n pricing_data=pricing_data,\n substitute_part_data=substitute_part_data,\n )\n\n def get_data(self):\n \"\"\"Return a custom message\"\"\"\n return {\n 'info': 'Exported BOM'\n }\n\n\nclass PartPricing(AjaxView):\n \"\"\"View for inspecting part pricing information.\"\"\"\n\n model = Part\n ajax_template_name = \"part/part_pricing.html\"\n ajax_form_title = _(\"Part Pricing\")\n form_class = part_forms.PartPriceForm\n\n role_required = ['sales_order.view', 'part.view']\n\n def get_quantity(self):\n \"\"\"Return set quantity in decimal format.\"\"\"\n return Decimal(self.request.POST.get('quantity', 1))\n\n def get_part(self):\n \"\"\"Return the Part instance associated with this view\"\"\"\n try:\n return Part.objects.get(id=self.kwargs['pk'])\n except Part.DoesNotExist:\n return None\n\n def get_pricing(self, quantity=1, currency=None):\n \"\"\"Returns context with pricing information.\"\"\"\n if quantity <= 0:\n quantity = 1\n\n # TODO - Capacity for price comparison in different currencies\n currency = None\n\n # Currency scaler\n scaler = Decimal(1.0)\n\n part = self.get_part()\n\n ctx = {\n 'part': part,\n 'quantity': quantity,\n 'currency': currency,\n }\n\n if part is None:\n return ctx\n\n # Supplier pricing information\n if part.supplier_count > 0:\n buy_price = part.get_supplier_price_range(quantity)\n\n if buy_price is not None:\n min_buy_price, max_buy_price = buy_price\n\n min_buy_price /= scaler\n max_buy_price /= scaler\n\n min_unit_buy_price = round(min_buy_price / quantity, 3)\n max_unit_buy_price = round(max_buy_price / quantity, 3)\n\n min_buy_price = round(min_buy_price, 3)\n max_buy_price = round(max_buy_price, 3)\n\n if min_buy_price:\n ctx['min_total_buy_price'] = min_buy_price\n ctx['min_unit_buy_price'] = min_unit_buy_price\n\n if max_buy_price:\n ctx['max_total_buy_price'] = max_buy_price\n ctx['max_unit_buy_price'] = max_unit_buy_price\n\n # BOM pricing information\n if part.bom_count > 0:\n\n use_internal = InvenTreeSetting.get_setting('PART_BOM_USE_INTERNAL_PRICE', False)\n bom_price = part.get_bom_price_range(quantity, internal=use_internal)\n purchase_price = part.get_bom_price_range(quantity, purchase=True)\n\n if bom_price is not None:\n min_bom_price, max_bom_price = bom_price\n\n min_bom_price /= scaler\n max_bom_price /= scaler\n\n if min_bom_price:\n ctx['min_total_bom_price'] = round(min_bom_price, 3)\n ctx['min_unit_bom_price'] = round(min_bom_price / quantity, 3)\n\n if max_bom_price:\n ctx['max_total_bom_price'] = round(max_bom_price, 3)\n ctx['max_unit_bom_price'] = round(max_bom_price / quantity, 3)\n\n if purchase_price is not None:\n min_bom_purchase_price, max_bom_purchase_price = purchase_price\n\n min_bom_purchase_price /= scaler\n max_bom_purchase_price /= scaler\n if min_bom_purchase_price:\n ctx['min_total_bom_purchase_price'] = round(min_bom_purchase_price, 3)\n ctx['min_unit_bom_purchase_price'] = round(min_bom_purchase_price / quantity, 3)\n\n if max_bom_purchase_price:\n ctx['max_total_bom_purchase_price'] = round(max_bom_purchase_price, 3)\n ctx['max_unit_bom_purchase_price'] = round(max_bom_purchase_price / quantity, 3)\n\n # internal part pricing information\n internal_part_price = part.get_internal_price(quantity)\n if internal_part_price is not None:\n ctx['total_internal_part_price'] = round(internal_part_price, 3)\n ctx['unit_internal_part_price'] = round(internal_part_price / quantity, 3)\n\n # part pricing information\n part_price = part.get_price(quantity)\n if part_price is not None:\n ctx['total_part_price'] = round(part_price, 3)\n ctx['unit_part_price'] = round(part_price / quantity, 3)\n\n return ctx\n\n def get_initials(self):\n \"\"\"Returns initials for form.\"\"\"\n return {'quantity': self.get_quantity()}\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform custom GET action for this view\"\"\"\n init = self.get_initials()\n qty = self.get_quantity()\n\n return self.renderJsonResponse(request, self.form_class(initial=init), context=self.get_pricing(qty))\n\n def post(self, request, *args, **kwargs):\n \"\"\"Perform custom POST action for this view\"\"\"\n currency = None\n\n quantity = self.get_quantity()\n\n # Retain quantity value set by user\n form = self.form_class(initial=self.get_initials())\n\n # TODO - How to handle pricing in different currencies?\n currency = None\n\n # check if data is set\n try:\n data = self.data\n except AttributeError:\n data = {}\n\n # Always mark the form as 'invalid' (the user may wish to keep getting pricing data)\n data['form_valid'] = False\n\n return self.renderJsonResponse(request, form, data=data, context=self.get_pricing(quantity, currency))\n\n\nclass CategoryDetail(InvenTreeRoleMixin, InvenTreePluginViewMixin, DetailView):\n \"\"\"Detail view for PartCategory.\"\"\"\n\n model = PartCategory\n context_object_name = 'category'\n queryset = PartCategory.objects.all().prefetch_related('children')\n template_name = 'part/category.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Returns custom context data for the CategoryDetail view:\n\n - part_count: Number of parts in this category\n - starred_directly: True if this category is starred directly by the requesting user\n - starred: True if this category is starred by the requesting user\n \"\"\"\n context = super().get_context_data(**kwargs).copy()\n\n try:\n context['part_count'] = kwargs['object'].partcount()\n except KeyError:\n context['part_count'] = 0\n\n # Get current category\n category = kwargs.get('object', None)\n\n if category:\n\n # Insert \"starred\" information\n context['starred_directly'] = category.is_starred_by(\n self.request.user,\n include_parents=False,\n )\n\n if context['starred_directly']:\n # Save a database lookup - if 'starred_directly' is True, we know 'starred' is also\n context['starred'] = True\n else:\n context['starred'] = category.is_starred_by(self.request.user)\n\n return context\n",
"path": "InvenTree/part/views.py"
}
] | [
{
"content": "\"\"\"Django views for interacting with Part app.\"\"\"\n\nimport os\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import HttpResponseRedirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DetailView, ListView\n\nfrom common.files import FileManager\nfrom common.models import InvenTreeSetting\nfrom common.views import FileManagementAjaxView, FileManagementFormView\nfrom company.models import SupplierPart\nfrom InvenTree.helpers import str2bool, str2int\nfrom InvenTree.views import AjaxUpdateView, AjaxView, InvenTreeRoleMixin\nfrom plugin.views import InvenTreePluginViewMixin\nfrom stock.models import StockItem, StockLocation\n\nfrom . import forms as part_forms\nfrom . import settings as part_settings\nfrom .bom import ExportBom, IsValidBOMFormat, MakeBomTemplate\nfrom .models import Part, PartCategory\nfrom .part import MakePartTemplate\n\n\nclass PartIndex(InvenTreeRoleMixin, InvenTreePluginViewMixin, ListView):\n \"\"\"View for displaying list of Part objects.\"\"\"\n\n model = Part\n template_name = 'part/category.html'\n context_object_name = 'parts'\n\n def get_queryset(self):\n \"\"\"Custom queryset lookup to prefetch related fields\"\"\"\n return Part.objects.all().select_related('category')\n\n def get_context_data(self, **kwargs):\n \"\"\"Returns custom context data for the PartIndex view:\n\n - children: Number of child categories\n - category_count: Number of child categories\n - part_count: Number of parts contained\n \"\"\"\n context = super().get_context_data(**kwargs).copy()\n\n # View top-level categories\n children = PartCategory.objects.filter(parent=None)\n\n context['children'] = children\n context['category_count'] = PartCategory.objects.count()\n context['part_count'] = Part.objects.count()\n\n return context\n\n\nclass PartImport(FileManagementFormView):\n \"\"\"Part: Upload file, match to fields and import parts(using multi-Step form)\"\"\"\n permission_required = 'part.add'\n\n class PartFileManager(FileManager):\n \"\"\"Import field definitions\"\"\"\n REQUIRED_HEADERS = [\n 'Name',\n 'Description',\n ]\n\n OPTIONAL_MATCH_HEADERS = [\n 'Category',\n 'default_location',\n 'default_supplier',\n 'variant_of',\n ]\n\n OPTIONAL_HEADERS = [\n 'Keywords',\n 'IPN',\n 'Revision',\n 'Link',\n 'default_expiry',\n 'minimum_stock',\n 'Units',\n 'Notes',\n 'Active',\n 'base_cost',\n 'Multiple',\n 'Assembly',\n 'Component',\n 'is_template',\n 'Purchasable',\n 'Salable',\n 'Trackable',\n 'Virtual',\n 'Stock',\n 'Image',\n ]\n\n name = 'part'\n form_steps_template = [\n 'part/import_wizard/part_upload.html',\n 'part/import_wizard/match_fields.html',\n 'part/import_wizard/match_references.html',\n ]\n form_steps_description = [\n _(\"Upload File\"),\n _(\"Match Fields\"),\n _(\"Match References\"),\n ]\n\n form_field_map = {\n 'name': 'name',\n 'description': 'description',\n 'keywords': 'keywords',\n 'ipn': 'ipn',\n 'revision': 'revision',\n 'link': 'link',\n 'default_expiry': 'default_expiry',\n 'minimum_stock': 'minimum_stock',\n 'units': 'units',\n 'notes': 'notes',\n 'category': 'category',\n 'default_location': 'default_location',\n 'default_supplier': 'default_supplier',\n 'variant_of': 'variant_of',\n 'active': 'active',\n 'base_cost': 'base_cost',\n 'multiple': 'multiple',\n 'assembly': 'assembly',\n 'component': 'component',\n 'is_template': 'is_template',\n 'purchaseable': 'purchaseable',\n 'salable': 'salable',\n 'trackable': 'trackable',\n 'virtual': 'virtual',\n 'stock': 'stock',\n 'image': 'image',\n }\n file_manager_class = PartFileManager\n\n def get_field_selection(self):\n \"\"\"Fill the form fields for step 3.\"\"\"\n # fetch available elements\n self.allowed_items = {}\n self.matches = {}\n\n self.allowed_items['Category'] = PartCategory.objects.all().exclude(structural=True)\n self.matches['Category'] = ['name__icontains']\n self.allowed_items['default_location'] = StockLocation.objects.all().exclude(structural=True)\n self.matches['default_location'] = ['name__icontains']\n self.allowed_items['default_supplier'] = SupplierPart.objects.all()\n self.matches['default_supplier'] = ['SKU__icontains']\n self.allowed_items['variant_of'] = Part.objects.all().exclude(is_template=False)\n self.matches['variant_of'] = ['name__icontains']\n\n # setup\n self.file_manager.setup()\n # collect submitted column indexes\n col_ids = {}\n for col in self.file_manager.HEADERS:\n index = self.get_column_index(col)\n if index >= 0:\n col_ids[col] = index\n\n # parse all rows\n for row in self.rows:\n # check each submitted column\n for idx in col_ids:\n data = row['data'][col_ids[idx]]['cell']\n\n if idx in self.file_manager.OPTIONAL_MATCH_HEADERS:\n try:\n exact_match = self.allowed_items[idx].get(**{a: data for a in self.matches[idx]})\n except (ValueError, self.allowed_items[idx].model.DoesNotExist, self.allowed_items[idx].model.MultipleObjectsReturned):\n exact_match = None\n\n row['match_options_' + idx] = self.allowed_items[idx]\n row['match_' + idx] = exact_match\n continue\n\n # general fields\n row[idx.lower()] = data\n\n def done(self, form_list, **kwargs):\n \"\"\"Create items.\"\"\"\n items = self.get_clean_items()\n\n import_done = 0\n import_error = []\n\n # Create Part instances\n for part_data in items.values():\n\n # set related parts\n optional_matches = {}\n for idx in self.file_manager.OPTIONAL_MATCH_HEADERS:\n if idx.lower() in part_data:\n try:\n optional_matches[idx] = self.allowed_items[idx].get(pk=int(part_data[idx.lower()]))\n except (ValueError, self.allowed_items[idx].model.DoesNotExist, self.allowed_items[idx].model.MultipleObjectsReturned):\n optional_matches[idx] = None\n else:\n optional_matches[idx] = None\n\n # add part\n new_part = Part(\n name=part_data.get('name', ''),\n description=part_data.get('description', ''),\n keywords=part_data.get('keywords', None),\n IPN=part_data.get('ipn', None),\n revision=part_data.get('revision', None),\n link=part_data.get('link', None),\n default_expiry=str2int(part_data.get('default_expiry'), 0),\n minimum_stock=str2int(part_data.get('minimum_stock'), 0),\n units=part_data.get('units', None),\n notes=part_data.get('notes', None),\n category=optional_matches['Category'],\n default_location=optional_matches['default_location'],\n default_supplier=optional_matches['default_supplier'],\n variant_of=optional_matches['variant_of'],\n active=str2bool(part_data.get('active', True)),\n base_cost=str2int(part_data.get('base_cost'), 0),\n multiple=str2int(part_data.get('multiple'), 1),\n assembly=str2bool(part_data.get('assembly', part_settings.part_assembly_default())),\n component=str2bool(part_data.get('component', part_settings.part_component_default())),\n is_template=str2bool(part_data.get('is_template', part_settings.part_template_default())),\n purchaseable=str2bool(part_data.get('purchaseable', part_settings.part_purchaseable_default())),\n salable=str2bool(part_data.get('salable', part_settings.part_salable_default())),\n trackable=str2bool(part_data.get('trackable', part_settings.part_trackable_default())),\n virtual=str2bool(part_data.get('virtual', part_settings.part_virtual_default())),\n image=part_data.get('image', None),\n )\n\n # check if theres a category assigned, if not skip this part or else bad things happen\n if not optional_matches['Category']:\n import_error.append(_(\"Can't import part {name} because there is no category assigned\").format(name=new_part.name))\n continue\n\n try:\n new_part.save()\n\n # add stock item if set\n if part_data.get('stock', None):\n stock = StockItem(\n part=new_part,\n location=new_part.default_location,\n quantity=int(part_data.get('stock', 1)),\n )\n stock.save()\n\n import_done += 1\n except ValidationError as _e:\n import_error.append(', '.join(set(_e.messages)))\n\n # Set alerts\n if import_done:\n alert = f\"<strong>{_('Part-Import')}</strong><br>{_('Imported {n} parts').format(n=import_done)}\"\n messages.success(self.request, alert)\n if import_error:\n error_text = '\\n'.join([f'<li><strong>{import_error.count(a)}</strong>: {a}</li>' for a in set(import_error)])\n messages.error(self.request, f\"<strong>{_('Some errors occured:')}</strong><br><ul>{error_text}</ul>\")\n\n return HttpResponseRedirect(reverse('part-index'))\n\n\nclass PartImportTemplate(AjaxView):\n \"\"\"Provide a part import template file for download.\n\n - Generates a template file in the provided format e.g. ?format=csv\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform a GET request to download the 'Part import' template\"\"\"\n export_format = request.GET.get('format', 'csv')\n\n return MakePartTemplate(export_format)\n\n\nclass PartImportAjax(FileManagementAjaxView, PartImport):\n \"\"\"Multi-step form wizard for importing Part data\"\"\"\n ajax_form_steps_template = [\n 'part/import_wizard/ajax_part_upload.html',\n 'part/import_wizard/ajax_match_fields.html',\n 'part/import_wizard/ajax_match_references.html',\n ]\n\n def validate(self, obj, form, **kwargs):\n \"\"\"Validation is performed based on the current form step\"\"\"\n return PartImport.validate(self, self.steps.current, form, **kwargs)\n\n\nclass PartDetail(InvenTreeRoleMixin, InvenTreePluginViewMixin, DetailView):\n \"\"\"Detail view for Part object.\"\"\"\n\n context_object_name = 'part'\n queryset = Part.objects.all().select_related('category')\n template_name = 'part/detail.html'\n form_class = part_forms.PartPriceForm\n\n # Add in some extra context information based on query params\n def get_context_data(self, **kwargs):\n \"\"\"Provide extra context data to template.\"\"\"\n context = super().get_context_data(**kwargs)\n\n part = self.get_object()\n\n ctx = part.get_context_data(self.request)\n\n context.update(**ctx)\n\n return context\n\n def get_quantity(self):\n \"\"\"Return set quantity in decimal format.\"\"\"\n return Decimal(self.request.POST.get('quantity', 1))\n\n def get_part(self):\n \"\"\"Return the Part instance associated with this view\"\"\"\n return self.get_object()\n\n def get_initials(self):\n \"\"\"Returns initials for form.\"\"\"\n return {'quantity': self.get_quantity()}\n\n def post(self, request, *args, **kwargs):\n \"\"\"POST action performs as a GET action\"\"\"\n self.object = self.get_object()\n kwargs['object'] = self.object\n ctx = self.get_context_data(**kwargs)\n return self.get(request, context=ctx)\n\n\nclass PartDetailFromIPN(PartDetail):\n \"\"\"Part detail view using the IPN (internal part number) of the Part as the lookup field\"\"\"\n\n slug_field = 'IPN'\n slug_url_kwarg = 'slug'\n\n def get_object(self):\n \"\"\"Return Part object which IPN field matches the slug value.\"\"\"\n queryset = self.get_queryset()\n # Get slug\n slug = self.kwargs.get(self.slug_url_kwarg)\n\n if slug is not None:\n slug_field = self.get_slug_field()\n # Filter by the slug value\n queryset = queryset.filter(**{slug_field: slug})\n\n try:\n # Get unique part from queryset\n part = queryset.get()\n # Return Part object\n return part\n except queryset.model.MultipleObjectsReturned:\n pass\n except queryset.model.DoesNotExist:\n pass\n\n return None\n\n def get(self, request, *args, **kwargs):\n \"\"\"Attempt to match slug to a Part, else redirect to PartIndex view.\"\"\"\n self.object = self.get_object()\n\n if not self.object:\n return HttpResponseRedirect(reverse('part-index'))\n\n return super(PartDetailFromIPN, self).get(request, *args, **kwargs)\n\n\nclass PartImageSelect(AjaxUpdateView):\n \"\"\"View for selecting Part image from existing images.\"\"\"\n\n model = Part\n ajax_template_name = 'part/select_image.html'\n ajax_form_title = _('Select Part Image')\n\n fields = [\n 'image',\n ]\n\n def post(self, request, *args, **kwargs):\n \"\"\"Perform POST action to assign selected image to the Part instance\"\"\"\n part = self.get_object()\n form = self.get_form()\n\n img = request.POST.get('image', '')\n\n img = os.path.basename(img)\n\n data = {}\n\n if img:\n img_path = settings.MEDIA_ROOT.joinpath('part_images', img)\n\n # Ensure that the image already exists\n if os.path.exists(img_path):\n\n part.image = os.path.join('part_images', img)\n part.save()\n\n data['success'] = _('Updated part image')\n\n if 'success' not in data:\n data['error'] = _('Part image not found')\n\n return self.renderJsonResponse(request, form, data)\n\n\nclass BomUpload(InvenTreeRoleMixin, DetailView):\n \"\"\"View for uploading a BOM file, and handling BOM data importing.\"\"\"\n\n context_object_name = 'part'\n queryset = Part.objects.all()\n template_name = 'part/upload_bom.html'\n\n\nclass BomUploadTemplate(AjaxView):\n \"\"\"Provide a BOM upload template file for download.\n\n - Generates a template file in the provided format e.g. ?format=csv\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform a GET request to download the 'BOM upload' template\"\"\"\n export_format = request.GET.get('format', 'csv')\n\n return MakeBomTemplate(export_format)\n\n\nclass BomDownload(AjaxView):\n \"\"\"Provide raw download of a BOM file.\n\n - File format should be passed as a query param e.g. ?format=csv\n \"\"\"\n\n role_required = 'part.view'\n\n model = Part\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform GET request to download BOM data\"\"\"\n part = get_object_or_404(Part, pk=self.kwargs['pk'])\n\n export_format = request.GET.get('format', 'csv')\n\n cascade = str2bool(request.GET.get('cascade', False))\n\n parameter_data = str2bool(request.GET.get('parameter_data', False))\n\n substitute_part_data = str2bool(request.GET.get('substitute_part_data', False))\n\n stock_data = str2bool(request.GET.get('stock_data', False))\n\n supplier_data = str2bool(request.GET.get('supplier_data', False))\n\n manufacturer_data = str2bool(request.GET.get('manufacturer_data', False))\n\n pricing_data = str2bool(request.GET.get('pricing_data', False))\n\n levels = request.GET.get('levels', None)\n\n if levels is not None:\n try:\n levels = int(levels)\n\n if levels <= 0:\n levels = None\n\n except ValueError:\n levels = None\n\n if not IsValidBOMFormat(export_format):\n export_format = 'csv'\n\n return ExportBom(part,\n fmt=export_format,\n cascade=cascade,\n max_levels=levels,\n parameter_data=parameter_data,\n stock_data=stock_data,\n supplier_data=supplier_data,\n manufacturer_data=manufacturer_data,\n pricing_data=pricing_data,\n substitute_part_data=substitute_part_data,\n )\n\n def get_data(self):\n \"\"\"Return a custom message\"\"\"\n return {\n 'info': 'Exported BOM'\n }\n\n\nclass PartPricing(AjaxView):\n \"\"\"View for inspecting part pricing information.\"\"\"\n\n model = Part\n ajax_template_name = \"part/part_pricing.html\"\n ajax_form_title = _(\"Part Pricing\")\n form_class = part_forms.PartPriceForm\n\n role_required = ['sales_order.view', 'part.view']\n\n def get_quantity(self):\n \"\"\"Return set quantity in decimal format.\"\"\"\n return Decimal(self.request.POST.get('quantity', 1))\n\n def get_part(self):\n \"\"\"Return the Part instance associated with this view\"\"\"\n try:\n return Part.objects.get(id=self.kwargs['pk'])\n except Part.DoesNotExist:\n return None\n\n def get_pricing(self, quantity=1, currency=None):\n \"\"\"Returns context with pricing information.\"\"\"\n if quantity <= 0:\n quantity = 1\n\n # TODO - Capacity for price comparison in different currencies\n currency = None\n\n # Currency scaler\n scaler = Decimal(1.0)\n\n part = self.get_part()\n\n ctx = {\n 'part': part,\n 'quantity': quantity,\n 'currency': currency,\n }\n\n if part is None:\n return ctx\n\n # Supplier pricing information\n if part.supplier_count > 0:\n buy_price = part.get_supplier_price_range(quantity)\n\n if buy_price is not None:\n min_buy_price, max_buy_price = buy_price\n\n min_buy_price /= scaler\n max_buy_price /= scaler\n\n min_unit_buy_price = round(min_buy_price / quantity, 3)\n max_unit_buy_price = round(max_buy_price / quantity, 3)\n\n min_buy_price = round(min_buy_price, 3)\n max_buy_price = round(max_buy_price, 3)\n\n if min_buy_price:\n ctx['min_total_buy_price'] = min_buy_price\n ctx['min_unit_buy_price'] = min_unit_buy_price\n\n if max_buy_price:\n ctx['max_total_buy_price'] = max_buy_price\n ctx['max_unit_buy_price'] = max_unit_buy_price\n\n # BOM pricing information\n if part.bom_count > 0:\n\n use_internal = InvenTreeSetting.get_setting('PART_BOM_USE_INTERNAL_PRICE', False)\n bom_price = part.get_bom_price_range(quantity, internal=use_internal)\n purchase_price = part.get_bom_price_range(quantity, purchase=True)\n\n if bom_price is not None:\n min_bom_price, max_bom_price = bom_price\n\n min_bom_price /= scaler\n max_bom_price /= scaler\n\n if min_bom_price:\n ctx['min_total_bom_price'] = round(min_bom_price, 3)\n ctx['min_unit_bom_price'] = round(min_bom_price / quantity, 3)\n\n if max_bom_price:\n ctx['max_total_bom_price'] = round(max_bom_price, 3)\n ctx['max_unit_bom_price'] = round(max_bom_price / quantity, 3)\n\n if purchase_price is not None:\n min_bom_purchase_price, max_bom_purchase_price = purchase_price\n\n min_bom_purchase_price /= scaler\n max_bom_purchase_price /= scaler\n if min_bom_purchase_price:\n ctx['min_total_bom_purchase_price'] = round(min_bom_purchase_price, 3)\n ctx['min_unit_bom_purchase_price'] = round(min_bom_purchase_price / quantity, 3)\n\n if max_bom_purchase_price:\n ctx['max_total_bom_purchase_price'] = round(max_bom_purchase_price, 3)\n ctx['max_unit_bom_purchase_price'] = round(max_bom_purchase_price / quantity, 3)\n\n # internal part pricing information\n internal_part_price = part.get_internal_price(quantity)\n if internal_part_price is not None:\n ctx['total_internal_part_price'] = round(internal_part_price, 3)\n ctx['unit_internal_part_price'] = round(internal_part_price / quantity, 3)\n\n # part pricing information\n part_price = part.get_price(quantity)\n if part_price is not None:\n ctx['total_part_price'] = round(part_price, 3)\n ctx['unit_part_price'] = round(part_price / quantity, 3)\n\n return ctx\n\n def get_initials(self):\n \"\"\"Returns initials for form.\"\"\"\n return {'quantity': self.get_quantity()}\n\n def get(self, request, *args, **kwargs):\n \"\"\"Perform custom GET action for this view\"\"\"\n init = self.get_initials()\n qty = self.get_quantity()\n\n return self.renderJsonResponse(request, self.form_class(initial=init), context=self.get_pricing(qty))\n\n def post(self, request, *args, **kwargs):\n \"\"\"Perform custom POST action for this view\"\"\"\n currency = None\n\n quantity = self.get_quantity()\n\n # Retain quantity value set by user\n form = self.form_class(initial=self.get_initials())\n\n # TODO - How to handle pricing in different currencies?\n currency = None\n\n # check if data is set\n try:\n data = self.data\n except AttributeError:\n data = {}\n\n # Always mark the form as 'invalid' (the user may wish to keep getting pricing data)\n data['form_valid'] = False\n\n return self.renderJsonResponse(request, form, data=data, context=self.get_pricing(quantity, currency))\n\n\nclass CategoryDetail(InvenTreeRoleMixin, InvenTreePluginViewMixin, DetailView):\n \"\"\"Detail view for PartCategory.\"\"\"\n\n model = PartCategory\n context_object_name = 'category'\n queryset = PartCategory.objects.all().prefetch_related('children')\n template_name = 'part/category.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Returns custom context data for the CategoryDetail view:\n\n - part_count: Number of parts in this category\n - starred_directly: True if this category is starred directly by the requesting user\n - starred: True if this category is starred by the requesting user\n \"\"\"\n context = super().get_context_data(**kwargs).copy()\n\n try:\n context['part_count'] = kwargs['object'].partcount()\n except KeyError:\n context['part_count'] = 0\n\n # Get current category\n category = kwargs.get('object', None)\n\n if category:\n\n # Insert \"starred\" information\n context['starred_directly'] = category.is_starred_by(\n self.request.user,\n include_parents=False,\n )\n\n if context['starred_directly']:\n # Save a database lookup - if 'starred_directly' is True, we know 'starred' is also\n context['starred'] = True\n else:\n context['starred'] = category.is_starred_by(self.request.user)\n\n return context\n",
"path": "InvenTree/part/views.py"
}
] | diff --git a/InvenTree/part/views.py b/InvenTree/part/views.py
index 93b088716494..db1cf52df206 100644
--- a/InvenTree/part/views.py
+++ b/InvenTree/part/views.py
@@ -27,7 +27,7 @@
from .part import MakePartTemplate
-class PartIndex(InvenTreeRoleMixin, ListView):
+class PartIndex(InvenTreeRoleMixin, InvenTreePluginViewMixin, ListView):
"""View for displaying list of Part objects."""
model = Part
|
LibraryOfCongress__concordia-463 | Pagination and filtering don't work together
**What behavior did you observe? Please describe the bug**
The filter became unset and went to all images.
**How can we reproduce the bug?**
Steps to reproduce the behavior:
1. Go to an item that has several assets in open and submitted states.
2. Use the filter to only view submitted for review assets.
3. Scroll down and click page 2.
**What is the expected behavior?**
When I click page 2, the filter should be maintained.
| [
{
"content": "# TODO: use correct copyright header\nimport os\n\nfrom django.contrib import messages\nfrom dotenv import load_dotenv\n\n# Build paths inside the project like this: os.path.join(SITE_ROOT_DIR, ...)\nCONCORDIA_APP_DIR = os.path.abspath(os.path.dirname(__file__))\nSITE_ROOT_DIR = os.path.dirname(CONCORDIA_APP_DIR)\n\n# Build path for and load .env file.\ndotenv_path = os.path.join(SITE_ROOT_DIR, \".env\")\nload_dotenv(dotenv_path)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"django-secret-key\"\n\nCONCORDIA_ENVIRONMENT = os.environ.get(\"CONCORDIA_ENVIRONMENT\", \"development\")\n\n# Optional SMTP authentication information for EMAIL_HOST.\nEMAIL_HOST_USER = \"\"\nEMAIL_HOST_PASSWORD = \"\"\nEMAIL_USE_TLS = False\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\nALLOWED_HOSTS = [\"*\"]\n\nDEBUG = False\nCSRF_COOKIE_SECURE = False\n\nAUTH_PASSWORD_VALIDATORS = []\nEMAIL_BACKEND = \"django.core.mail.backends.filebased.EmailBackend\"\n# EMAIL_FILE_PATH = os.path.join(SITE_ROOT_DIR, 'emails')\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\nLANGUAGE_CODE = \"en-us\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\nROOT_URLCONF = \"concordia.urls\"\nSTATIC_ROOT = \"static\"\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [\n os.path.join(CONCORDIA_APP_DIR, \"static\"),\n os.path.join(\"/\".join(CONCORDIA_APP_DIR.split(\"/\")[:-1]), \"concordia/static\"),\n]\nSTATICFILES_DIRS = [os.path.join(CONCORDIA_APP_DIR, \"static\")]\nTEMPLATE_DEBUG = False\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nWSGI_APPLICATION = \"concordia.wsgi.application\"\n\nADMIN_SITE = {\"site_header\": \"Concordia Admin\", \"site_title\": \"Concordia\"}\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"concordia\",\n \"USER\": \"concordia\",\n \"PASSWORD\": os.getenv(\"POSTGRESQL_PW\"),\n \"HOST\": os.getenv(\"POSTGRESQL_HOST\", \"localhost\"),\n \"PORT\": \"5432\",\n \"CONN_MAX_AGE\": 15 * 60, # Keep database connections open for 15 minutes\n }\n}\n\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.humanize\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n \"raven.contrib.django.raven_compat\",\n \"maintenance_mode\",\n \"bootstrap4\",\n \"concordia.apps.ConcordiaAppConfig\",\n \"exporter\",\n \"importer\",\n \"captcha\",\n \"django_prometheus_metrics\",\n \"robots\",\n]\n\nif DEBUG:\n INSTALLED_APPS += [\"django_extensions\"]\n INSTALLED_APPS += [\"kombu.transport\"]\n\n\nMIDDLEWARE = [\n \"django_prometheus_metrics.middleware.PrometheusBeforeMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n # WhiteNoise serves static files efficiently:\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"maintenance_mode.middleware.MaintenanceModeMiddleware\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(SITE_ROOT_DIR, \"templates\"),\n os.path.join(CONCORDIA_APP_DIR, \"templates\"),\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n # Concordia\n \"concordia.context_processors.system_configuration\",\n \"concordia.context_processors.site_navigation\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nCACHES = {\"default\": {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": os.path.join(os.path.dirname(__file__), \"whoosh_index\"),\n }\n}\n\n# Celery settings\nCELERY_BROKER_URL = \"pyamqp://guest@rabbit\"\nCELERY_RESULT_BACKEND = \"rpc://\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_IMPORTS = (\"importer.tasks\",)\n\nCELERY_BROKER_HEARTBEAT = 0\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"confirm_publish\": True,\n \"max_retries\": 3,\n \"interval_start\": 0,\n \"interval_step\": 0.2,\n \"interval_max\": 0.5,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"long\": {\n \"format\": \"[{asctime} {levelname} {name}:{lineno}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n \"short\": {\n \"format\": \"[{levelname} {name}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"stream\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"long\",\n },\n \"null\": {\"level\": \"DEBUG\", \"class\": \"logging.NullHandler\"},\n \"file\": {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"long\",\n \"filename\": \"{}/logs/concordia.log\".format(SITE_ROOT_DIR),\n \"when\": \"H\",\n \"interval\": 3,\n \"backupCount\": 16,\n },\n \"celery\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"{}/logs/celery.log\".format(SITE_ROOT_DIR),\n \"formatter\": \"long\",\n \"maxBytes\": 1024 * 1024 * 100, # 100 mb\n },\n \"sentry\": {\n \"level\": \"WARNING\",\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"file\", \"stream\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"celery\": {\"handlers\": [\"celery\", \"stream\"], \"level\": \"DEBUG\"},\n \"sentry.errors\": {\"level\": \"INFO\", \"handlers\": [\"stream\"], \"propagate\": False},\n },\n}\n\n\n################################################################################\n# Django-specific settings above\n################################################################################\n\nACCOUNT_ACTIVATION_DAYS = 7\n\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT_DIR, \"media\")\n\nLOGIN_URL = \"login\"\n\nPASSWORD_VALIDATOR = (\n \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\"NAME\": PASSWORD_VALIDATOR},\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\"min_length\": 8},\n },\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n {\"NAME\": \"concordia.validators.complexity\"},\n]\n\nAUTHENTICATION_BACKENDS = [\n \"concordia.email_username_backend.EmailOrUsernameModelBackend\"\n]\n\nCAPTCHA_CHALLENGE_FUNCT = \"captcha.helpers.random_char_challenge\"\n#: Anonymous sessions require captcha validation every day by default:\nANONYMOUS_CAPTCHA_VALIDATION_INTERVAL = 86400\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\nWHITENOISE_ROOT = STATIC_ROOT\n\nPASSWORD_RESET_TIMEOUT_DAYS = 1\nACCOUNT_ACTIVATION_DAYS = 1\nREGISTRATION_OPEN = True # set to false to temporarily disable registrations\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\nSENTRY_DSN = os.environ.get(\"SENTRY_DSN\", \"\")\nSENTRY_PUBLIC_DSN = os.environ.get(\"SENTRY_PUBLIC_DSN\", \"\")\n\nif SENTRY_DSN:\n RAVEN_CONFIG = {\"dsn\": SENTRY_DSN, \"environment\": CONCORDIA_ENVIRONMENT}\n\n# When the MAINTENANCE_MODE setting is true, this template will be used to\n# generate a 503 response:\nMAINTENANCE_MODE_TEMPLATE = \"maintenance-mode.html\"\n\n# Names of special django.auth Groups\nCOMMUNITY_MANAGER_GROUP_NAME = \"Community Managers\"\nNEWSLETTER_GROUP_NAME = \"Newsletter\"\n\n# Django sites framework setting\nSITE_ID = 1\nROBOTS_USE_SITEMAP = False\nROBOTS_USE_HOST = False\n\n# Transcription-related settings\n\n#: Number of seconds an asset reservation is valid for\nTRANSCRIPTION_RESERVATION_SECONDS = 5 * 60\n\n#: Web cache policy settings\nDEFAULT_PAGE_TTL = 5 * 60\n",
"path": "concordia/settings_template.py"
}
] | [
{
"content": "# TODO: use correct copyright header\nimport os\n\nfrom django.contrib import messages\nfrom dotenv import load_dotenv\n\n# Build paths inside the project like this: os.path.join(SITE_ROOT_DIR, ...)\nCONCORDIA_APP_DIR = os.path.abspath(os.path.dirname(__file__))\nSITE_ROOT_DIR = os.path.dirname(CONCORDIA_APP_DIR)\n\n# Build path for and load .env file.\ndotenv_path = os.path.join(SITE_ROOT_DIR, \".env\")\nload_dotenv(dotenv_path)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"django-secret-key\"\n\nCONCORDIA_ENVIRONMENT = os.environ.get(\"CONCORDIA_ENVIRONMENT\", \"development\")\n\n# Optional SMTP authentication information for EMAIL_HOST.\nEMAIL_HOST_USER = \"\"\nEMAIL_HOST_PASSWORD = \"\"\nEMAIL_USE_TLS = False\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\nALLOWED_HOSTS = [\"*\"]\n\nDEBUG = False\nCSRF_COOKIE_SECURE = False\n\nAUTH_PASSWORD_VALIDATORS = []\nEMAIL_BACKEND = \"django.core.mail.backends.filebased.EmailBackend\"\n# EMAIL_FILE_PATH = os.path.join(SITE_ROOT_DIR, 'emails')\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\nLANGUAGE_CODE = \"en-us\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\nROOT_URLCONF = \"concordia.urls\"\nSTATIC_ROOT = \"static\"\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [\n os.path.join(CONCORDIA_APP_DIR, \"static\"),\n os.path.join(\"/\".join(CONCORDIA_APP_DIR.split(\"/\")[:-1]), \"concordia/static\"),\n]\nSTATICFILES_DIRS = [os.path.join(CONCORDIA_APP_DIR, \"static\")]\nTEMPLATE_DEBUG = False\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nWSGI_APPLICATION = \"concordia.wsgi.application\"\n\nADMIN_SITE = {\"site_header\": \"Concordia Admin\", \"site_title\": \"Concordia\"}\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"concordia\",\n \"USER\": \"concordia\",\n \"PASSWORD\": os.getenv(\"POSTGRESQL_PW\"),\n \"HOST\": os.getenv(\"POSTGRESQL_HOST\", \"localhost\"),\n \"PORT\": \"5432\",\n \"CONN_MAX_AGE\": 15 * 60, # Keep database connections open for 15 minutes\n }\n}\n\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.humanize\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n \"raven.contrib.django.raven_compat\",\n \"maintenance_mode\",\n \"bootstrap4\",\n \"bittersweet\",\n \"concordia.apps.ConcordiaAppConfig\",\n \"exporter\",\n \"importer\",\n \"captcha\",\n \"django_prometheus_metrics\",\n \"robots\",\n]\n\nif DEBUG:\n INSTALLED_APPS += [\"django_extensions\"]\n INSTALLED_APPS += [\"kombu.transport\"]\n\n\nMIDDLEWARE = [\n \"django_prometheus_metrics.middleware.PrometheusBeforeMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n # WhiteNoise serves static files efficiently:\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"maintenance_mode.middleware.MaintenanceModeMiddleware\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(SITE_ROOT_DIR, \"templates\"),\n os.path.join(CONCORDIA_APP_DIR, \"templates\"),\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.media\",\n # Concordia\n \"concordia.context_processors.system_configuration\",\n \"concordia.context_processors.site_navigation\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nCACHES = {\"default\": {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"}}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": os.path.join(os.path.dirname(__file__), \"whoosh_index\"),\n }\n}\n\n# Celery settings\nCELERY_BROKER_URL = \"pyamqp://guest@rabbit\"\nCELERY_RESULT_BACKEND = \"rpc://\"\n\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_IMPORTS = (\"importer.tasks\",)\n\nCELERY_BROKER_HEARTBEAT = 0\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"confirm_publish\": True,\n \"max_retries\": 3,\n \"interval_start\": 0,\n \"interval_step\": 0.2,\n \"interval_max\": 0.5,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"long\": {\n \"format\": \"[{asctime} {levelname} {name}:{lineno}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n \"short\": {\n \"format\": \"[{levelname} {name}] {message}\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"stream\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"long\",\n },\n \"null\": {\"level\": \"DEBUG\", \"class\": \"logging.NullHandler\"},\n \"file\": {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"long\",\n \"filename\": \"{}/logs/concordia.log\".format(SITE_ROOT_DIR),\n \"when\": \"H\",\n \"interval\": 3,\n \"backupCount\": 16,\n },\n \"celery\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"{}/logs/celery.log\".format(SITE_ROOT_DIR),\n \"formatter\": \"long\",\n \"maxBytes\": 1024 * 1024 * 100, # 100 mb\n },\n \"sentry\": {\n \"level\": \"WARNING\",\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n },\n \"loggers\": {\n \"django\": {\"handlers\": [\"file\", \"stream\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"celery\": {\"handlers\": [\"celery\", \"stream\"], \"level\": \"DEBUG\"},\n \"sentry.errors\": {\"level\": \"INFO\", \"handlers\": [\"stream\"], \"propagate\": False},\n },\n}\n\n\n################################################################################\n# Django-specific settings above\n################################################################################\n\nACCOUNT_ACTIVATION_DAYS = 7\n\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(SITE_ROOT_DIR, \"media\")\n\nLOGIN_URL = \"login\"\n\nPASSWORD_VALIDATOR = (\n \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\"NAME\": PASSWORD_VALIDATOR},\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\"min_length\": 8},\n },\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n {\"NAME\": \"concordia.validators.complexity\"},\n]\n\nAUTHENTICATION_BACKENDS = [\n \"concordia.email_username_backend.EmailOrUsernameModelBackend\"\n]\n\nCAPTCHA_CHALLENGE_FUNCT = \"captcha.helpers.random_char_challenge\"\n#: Anonymous sessions require captcha validation every day by default:\nANONYMOUS_CAPTCHA_VALIDATION_INTERVAL = 86400\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\nWHITENOISE_ROOT = STATIC_ROOT\n\nPASSWORD_RESET_TIMEOUT_DAYS = 1\nACCOUNT_ACTIVATION_DAYS = 1\nREGISTRATION_OPEN = True # set to false to temporarily disable registrations\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\nSENTRY_DSN = os.environ.get(\"SENTRY_DSN\", \"\")\nSENTRY_PUBLIC_DSN = os.environ.get(\"SENTRY_PUBLIC_DSN\", \"\")\n\nif SENTRY_DSN:\n RAVEN_CONFIG = {\"dsn\": SENTRY_DSN, \"environment\": CONCORDIA_ENVIRONMENT}\n\n# When the MAINTENANCE_MODE setting is true, this template will be used to\n# generate a 503 response:\nMAINTENANCE_MODE_TEMPLATE = \"maintenance-mode.html\"\n\n# Names of special django.auth Groups\nCOMMUNITY_MANAGER_GROUP_NAME = \"Community Managers\"\nNEWSLETTER_GROUP_NAME = \"Newsletter\"\n\n# Django sites framework setting\nSITE_ID = 1\nROBOTS_USE_SITEMAP = False\nROBOTS_USE_HOST = False\n\n# Transcription-related settings\n\n#: Number of seconds an asset reservation is valid for\nTRANSCRIPTION_RESERVATION_SECONDS = 5 * 60\n\n#: Web cache policy settings\nDEFAULT_PAGE_TTL = 5 * 60\n",
"path": "concordia/settings_template.py"
}
] | diff --git a/concordia/settings_template.py b/concordia/settings_template.py
index 261edf2e4..58dd93e94 100755
--- a/concordia/settings_template.py
+++ b/concordia/settings_template.py
@@ -78,6 +78,7 @@
"raven.contrib.django.raven_compat",
"maintenance_mode",
"bootstrap4",
+ "bittersweet",
"concordia.apps.ConcordiaAppConfig",
"exporter",
"importer",
diff --git a/concordia/templates/standard-pagination.html b/concordia/templates/standard-pagination.html
index dfb470e05..830fd92a0 100644
--- a/concordia/templates/standard-pagination.html
+++ b/concordia/templates/standard-pagination.html
@@ -1,3 +1,5 @@
+{% load bittersweet_querystring %}
+
{% comment %}
This template fragment assumes that you are using Bootstrap's default pagination
with a Django ListView CBV or equivalent which has the default is_paginated,
@@ -9,7 +11,7 @@
<ul class="pagination mx-auto justify-content-center">
{% if page_obj.has_previous %}
<li class="page-item">
- <a class="page-link" href="?page={{ page_obj.previous_page_number }}" aria-title="Previous Page">←</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=page_obj.previous_page_number %}" aria-title="Previous Page">←</a>
</li>
{% else %}
<li class="page-item disabled" aria-hidden="true">
@@ -19,7 +21,7 @@
{% if page_obj.number > 1 %}
<li class="page-item">
- <a class="page-link" href="?page=1">1</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=1 %}">1</a>
</li>
{% endif %}
@@ -31,7 +33,7 @@
{% with page_obj.previous_page_number|add:-1 as second_previous_page %}
{% if second_previous_page > 1 %}
<li class="page-item">
- <a class="page-link" href="?page={{ second_previous_page }}">{{ second_previous_page }}</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=second_previous_page %}">{{ second_previous_page }}</a>
</li>
{% endif %}
{% endwith %}
@@ -39,19 +41,19 @@
{% if page_obj.previous_page_number > 1 %}
<li class="page-item">
- <a class="page-link" href="?page={{ page_obj.previous_page_number }}">{{ page_obj.previous_page_number }}</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=page_obj.previous_page_number %}">{{ page_obj.previous_page_number }}</a>
</li>
{% endif %}
<li class="page-item active">
- <a class="page-link" href="?page={{ page_obj.number }}">
+ <a class="page-link" href="?{% qs_alter request.GET page=page_obj.number %}">
{{ page_obj.number }}
</a>
</li>
{% if page_obj.next_page_number < paginator.num_pages %}
<li class="page-item">
- <a class="page-link" href="?page={{ page_obj.next_page_number }}">{{ page_obj.next_page_number }}</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=page_obj.next_page_number %}">{{ page_obj.next_page_number }}</a>
</li>
{% endif %}
@@ -59,7 +61,7 @@
{% with page_obj.next_page_number|add:1 as second_next_page %}
{% if second_next_page < paginator.num_pages %}
<li class="page-item">
- <a class="page-link" href="?page={{ second_next_page }}">{{ second_next_page }}</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=second_next_page %}">{{ second_next_page }}</a>
</li>
{% endif %}
{% endwith %}
@@ -71,13 +73,13 @@
{% if page_obj.number < paginator.num_pages %}
<li class="page-item">
- <a class="page-link" href="?page={{ paginator.num_pages }}">{{ paginator.num_pages }}</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=paginator.num_pages %}">{{ paginator.num_pages }}</a>
</li>
{% endif %}
{% if page_obj.has_next %}
<li class="page-item">
- <a class="page-link" href="?page={{ page_obj.next_page_number }}">→</a>
+ <a class="page-link" href="?{% qs_alter request.GET page=page_obj.next_page_number %}">→</a>
</li>
{% else %}
<li class="page-item disabled" aria-hidden="true">
diff --git a/concordia/templates/transcriptions/item_detail.html b/concordia/templates/transcriptions/item_detail.html
index 505c629d6..8fba3d32c 100644
--- a/concordia/templates/transcriptions/item_detail.html
+++ b/concordia/templates/transcriptions/item_detail.html
@@ -63,7 +63,7 @@ <h1 class="m-3">{{ item.title }}</h1>
<div class="col-md-4">
<small>Contributors: {{ contributor_count|intcomma }}</small>
</div>
- </div>
+ </div>
</div>
</div>
<div class="card-deck justify-content-center">
|
LMFDB__lmfdb-4167 | Random link for Dirichlet characters is broken
https://www.lmfdb.org/Character/Dirichlet/random gives an invalid label error (two in fact).
Also, three error messages are displayed when you enter the label "banana". Only one should be displayed.
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom lmfdb.app import app\nimport re\nfrom flask import render_template, url_for, request, redirect, abort\nfrom sage.all import gcd, euler_phi\nfrom lmfdb.utils import (\n to_dict, flash_error, SearchArray, YesNoBox, display_knowl, ParityBox,\n TextBox, CountBox, parse_bool, parse_ints, search_wrap,\n StatsDisplay, totaler, proportioners, comma)\nfrom lmfdb.utils.interesting import interesting_knowls\nfrom lmfdb.characters.utils import url_character\nfrom lmfdb.characters.web_character import (\n WebDirichletGroup,\n WebSmallDirichletGroup,\n WebDirichletCharacter,\n WebSmallDirichletCharacter,\n WebDBDirichletCharacter,\n WebDBDirichletGroup,\n)\nfrom lmfdb.characters.ListCharacters import get_character_modulus, get_character_conductor, get_character_order\nfrom lmfdb.characters import characters_page\nfrom sage.databases.cremona import class_to_int\nfrom lmfdb import db\n\n#### make url_character available from templates\[email protected]_processor\ndef ctx_characters():\n chardata = {}\n chardata['url_character'] = url_character\n return chardata\n\ndef bread(tail=[]):\n base = [('Characters',url_for(\".render_characterNavigation\")),\n ('Dirichlet', url_for(\".render_DirichletNavigation\"))]\n if not isinstance(tail, list):\n tail = [(tail, \" \")]\n return base + tail\n\ndef learn(current = None):\n r = []\n if current != 'extent':\n r.append( ('Completeness of the data', url_for(\".extent_page\")) )\n if current != 'source':\n r.append( ('Source of the data', url_for(\".how_computed_page\")) )\n if current != 'reliability':\n r.append( ('Reliability of the data', url_for(\".reliability\")) )\n if current != 'labels':\n r.append( ('Dirichlet character labels', url_for(\".labels_page\")) )\n return r\n\ndef credit():\n return \"Alex Best, Jonathan Boboer, David Lowry-Duda, and Andrew Sutherland\"\n\n###############################################################################\n# Route functions\n# Do not use url_for on these, use url_character defined in lmfdb.utils\n###############################################################################\n\n@characters_page.route(\"/\")\ndef render_characterNavigation():\n \"\"\"\n FIXME: replace query by ?browse=<key>&start=<int>&end=<int>\n \"\"\"\n return redirect(url_for(\".render_DirichletNavigation\"), 301)\n\nclass DirichSearchArray(SearchArray):\n noun = \"character\"\n plural_noun = \"characters\"\n jump_example = \"13.2\"\n jump_egspan = \"e.g. 13.2 for the Dirichlet character \\(\\displaystyle\\chi_{13}(2,·)\\),or 13.f for its Galois orbit.\"\n jump_knowl=\"character.dirichlet.search_input\"\n jump_prompt=\"Label\"\n def __init__(self):\n modulus = TextBox(\n \"modulus\",\n knowl=\"character.dirichlet.modulus\",\n label=\"Modulus\",\n example=\"13\",\n example_span=\"13\",\n )\n conductor = TextBox(\n \"conductor\",\n knowl = \"character.dirichlet.conductor\",\n label = \"Conductor\",\n example = \"5\",\n example_span = \"5 or 10,20\",\n )\n order = TextBox(\n \"order\",\n label=\"Order\",\n knowl=\"character.dirichlet.order\",\n example=\"2\",\n example_span=\"2 or 3-5\"\n )\n parity = ParityBox(\n \"parity\",\n knowl=\"character.dirichlet.parity\",\n label=\"Parity\",\n example=\"odd\"\n )\n is_primitive = YesNoBox(\n \"is_primitive\",\n label=\"Primitive\",\n knowl=\"character.dirichlet.primitive\",\n example=\"yes\"\n )\n is_real = YesNoBox(\n \"is_real\",\n label=\"Real\",\n knowl=\"character.dirichlet.real\",\n example=\"yes\"\n )\n is_minimal = YesNoBox(\n \"is_minimal\",\n label=\"Minimal\",\n knowl=\"character.dirichlet.minimal\",\n example=\"yes\"\n )\n count = CountBox()\n\n self.refine_array = [\n [modulus, conductor, order, is_real], [parity, is_primitive, is_minimal, count],\n ]\n self.browse_array = [\n [modulus],\n [conductor],\n [order],\n [parity],\n [is_primitive],\n [is_real],\n [is_minimal],\n [count],\n ]\n\n def search_types(self, info):\n return self._search_again(info, [\n ('List', 'List of characters'),\n ('Random', 'Random character')])\n\ndef common_parse(info, query):\n parse_ints(info, query, \"modulus\", name=\"modulus\")\n parse_ints(info, query, \"conductor\", name=\"conductor\")\n parse_ints(info, query, \"order\", name=\"order\")\n if 'parity' in info:\n parity=info['parity']\n if parity == 'even':\n query['parity'] = 1\n elif parity == 'odd':\n query['parity'] = -1\n parse_bool(info, query, \"is_primitive\", name=\"is_primitive\")\n parse_bool(info, query, \"is_real\", name=\"is_real\")\n parse_bool(info, query, \"is_minimal\", name=\"is_minimal\")\n\ndef validate_label(label):\n modulus, number = label.split('.')\n modulus = int(modulus)\n numbers = label_to_number(modulus, number, all=True)\n if numbers == 0:\n raise ValueError(\"it must be of the form modulus.number, with modulus and number natural numbers\")\n return True\n\ndef jump(info):\n jump_box = info[\"jump\"].strip() # only called when this present\n try:\n validate_label(jump_box)\n except ValueError as err:\n flash_error(\"%s is not a valid label: %s.\", jump_box, str(err))\n return redirect(url_for_label(jump_box))\n\ndef url_for_label(label):\n label = label.replace(\" \", \"\")\n try:\n validate_label(label)\n except ValueError as err:\n flash_error(\"%s is not a valid label: %s.\", label, str(err))\n return redirect(url_for(\".render_DirichletNavigation\"))\n modulus, number = label.split(\".\")\n modulus = int(modulus)\n number = label_to_number(modulus, number)\n return url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number)\n\n@search_wrap(\n template=\"character_search_results.html\",\n table=db.char_dir_orbits,\n title=\"Dirichlet character search results\",\n err_title=\"Dirichlet character search input error\",\n shortcuts={ \"jump\": jump },\n url_for_label=url_for_label,\n learnmore=learn,\n bread=lambda: bread(\"Search results\"),\n credit=credit,\n)\ndef dirichlet_character_search(info, query):\n common_parse(info, query)\n\ndef label_to_number(modulus, number, all=False):\n \"\"\"\n Takes the second part of a character label and converts it to the second\n part of a Conrey label. This could be trivial (just casting to an int)\n or could require converting from an orbit label to a number.\n\n If the label is invalid, returns 0.\n \"\"\"\n try:\n number = int(number)\n except ValueError:\n # encoding Galois orbit\n if modulus < 10000:\n try:\n orbit_label = '{0}.{1}'.format(modulus, 1 + class_to_int(number))\n except ValueError:\n return 0\n else:\n number = db.char_dir_orbits.lucky({'orbit_label':orbit_label}, 'galois_orbit')\n if number is None:\n return 0\n if not all:\n number = number[0]\n else:\n return 0\n else:\n if number <= 0 or gcd(modulus, number) != 1 or number > modulus:\n return 0\n return number\n\n@characters_page.route(\"/Dirichlet\")\n@characters_page.route(\"/Dirichlet/\")\ndef render_DirichletNavigation():\n try:\n if 'modbrowse' in request.args:\n arg = request.args['modbrowse']\n arg = arg.split('-')\n modulus_start = int(arg[0])\n modulus_end = int(arg[1])\n info = {'args': request.args}\n info['title'] = 'Dirichlet characters of modulus ' + str(modulus_start) + '-' + str(modulus_end)\n info['bread'] = bread('Modulus')\n info['learnmore'] = learn()\n info['credit'] = credit()\n h, c, rows, cols = get_character_modulus(modulus_start, modulus_end)\n info['contents'] = c\n info['headers'] = h\n info['rows'] = rows\n info['cols'] = cols\n return render_template(\"ModulusList.html\", **info)\n\n elif 'condbrowse' in request.args:\n arg = request.args['condbrowse']\n arg = arg.split('-')\n conductor_start = int(arg[0])\n conductor_end = int(arg[1])\n info = {'args': request.args}\n info['bread'] = bread('Conductor')\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['conductor_start'] = conductor_start\n info['conductor_end'] = conductor_end\n info['title'] = 'Dirichlet characters of conductor ' + str(conductor_start) + '-' + str(conductor_end)\n info['contents'] = get_character_conductor(conductor_start, conductor_end + 1)\n return render_template(\"ConductorList.html\", **info)\n\n elif 'ordbrowse' in request.args:\n arg = request.args['ordbrowse']\n arg = arg.split('-')\n order_start = int(arg[0])\n order_end = int(arg[1])\n info = {'args': request.args}\n info['bread'] = bread('Order')\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['order_start'] = order_start\n info['order_end'] = order_end\n info['title'] = 'Dirichlet characters of orders ' + str(order_start) + '-' + str(order_end)\n info['contents'] = get_character_order(order_start, order_end + 1)\n return render_template(\"OrderList.html\", **info)\n except ValueError as err:\n flash_error(\"Error raised in parsing: %s\", err)\n return render_template('CharacterNavigate.html', title='Dirichlet characters', bread=bread(), learnmore=learn(), credit=credit())\n\n if request.args:\n # hidden_search_type for prev/next buttons\n info = to_dict(request.args, search_array=DirichSearchArray())\n info[\"search_type\"] = search_type = info.get(\"search_type\", info.get(\"hst\", \"List\"))\n if search_type in ['List', 'Random']:\n return dirichlet_character_search(info)\n assert False\n info = to_dict(request.args, search_array=DirichSearchArray(), stats=DirichStats())\n info['bread'] = bread()\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['title'] = 'Dirichlet characters'\n return render_template('CharacterNavigate.html', info=info,**info)\n\n\n@characters_page.route(\"/Dirichlet/Labels\")\ndef labels_page():\n info = {}\n info['title'] = 'Dirichlet character labels'\n info['bread'] = bread('Labels')\n info['learnmore'] = learn('labels')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='character.dirichlet.conrey', **info)\n\n@characters_page.route(\"/Dirichlet/Source\")\ndef how_computed_page():\n info = {}\n info['title'] = 'Source of Dirichlet character data'\n info['bread'] = bread('Source')\n info['learnmore'] = learn('source')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='rcs.source.character.dirichlet', **info)\n\n@characters_page.route(\"/Dirichlet/Reliability\")\ndef reliability():\n info = {}\n info['title'] = 'Reliability of Dirichlet character data'\n info['bread'] = bread('Reliability')\n info['learnmore'] = learn('reliability')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='rcs.rigor.character.dirichlet', **info)\n\n@characters_page.route(\"/Dirichlet/Completeness\")\ndef extent_page():\n info = {}\n info['title'] = 'Completeness of Dirichlet character data'\n info['bread'] = bread('Extent')\n info['learnmore'] = learn('extent')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='dq.character.dirichlet.extent',\n **info)\n\ndef make_webchar(args):\n modulus = int(args['modulus'])\n if modulus < 10000:\n return WebDBDirichletCharacter(**args)\n elif modulus < 100000:\n return WebDirichletCharacter(**args)\n else:\n return WebSmallDirichletCharacter(**args)\n\n@characters_page.route(\"/Dirichlet/<modulus>\")\n@characters_page.route(\"/Dirichlet/<modulus>/\")\n@characters_page.route(\"/Dirichlet/<modulus>/<number>\")\ndef render_Dirichletwebpage(modulus=None, number=None):\n\n modulus = modulus.replace(' ','')\n if number is None and re.match(r'^[1-9][0-9]*\\.([1-9][0-9]*|[a-z]+)$', modulus):\n modulus, number = modulus.split('.')\n return redirect(url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number), 301)\n\n args={}\n args['type'] = 'Dirichlet'\n args['modulus'] = modulus\n args['number'] = number\n try:\n modulus = int(modulus)\n except ValueError:\n modulus = 0\n if modulus <= 0:\n flash_error(\"%s is not a valid modulus for a Dirichlet character. It should be a positive integer.\", args['modulus'])\n return redirect(url_for(\".render_DirichletNavigation\"))\n if modulus > 10**20:\n flash_error(\"specified modulus %s is too large, it should be less than $10^{20}$.\", modulus)\n return redirect(url_for(\".render_DirichletNavigation\"))\n\n\n\n if number is None:\n if modulus < 10000:\n info = WebDBDirichletGroup(**args).to_dict()\n info['show_orbit_label'] = True\n elif modulus < 100000:\n info = WebDirichletGroup(**args).to_dict()\n else:\n info = WebSmallDirichletGroup(**args).to_dict()\n info['title'] = 'Group of Dirichlet characters of modulus ' + str(modulus)\n info['bread'] = bread([('%d'%modulus, url_for(\".render_Dirichletwebpage\", modulus=modulus))])\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['code'] = dict([(k[4:],info[k]) for k in info if k[0:4] == \"code\"])\n info['code']['show'] = { lang:'' for lang in info['codelangs'] } # use default show names\n if 'gens' in info:\n info['generators'] = ', '.join([r'<a href=\"%s\">$\\chi_{%s}(%s,\\cdot)$'%(url_for(\".render_Dirichletwebpage\",modulus=modulus,number=g),modulus,g) for g in info['gens']])\n return render_template('CharGroup.html', **info)\n\n number = label_to_number(modulus, number)\n if number == 0:\n flash_error(\n \"the value %s is invalid. It should either be a positive integer \"\n \"coprime to and no greater than the modulus %s, or a letter that \"\n \"corresponds to a valid orbit index.\", args['number'], args['modulus']\n )\n return redirect(url_for(\".render_DirichletNavigation\"))\n args['number'] = number\n webchar = make_webchar(args)\n info = webchar.to_dict()\n info['bread'] = bread(\n [('%s'%modulus, url_for(\".render_Dirichletwebpage\", modulus=modulus)),\n ('%s'%number, url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number)) ])\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['code'] = dict([(k[4:],info[k]) for k in info if k[0:4] == \"code\"])\n info['code']['show'] = { lang:'' for lang in info['codelangs'] } # use default show names\n info['KNOWL_ID'] = 'character.dirichlet.%s.%s' % (modulus, number)\n return render_template('Character.html', **info)\n\ndef _dir_knowl_data(label, orbit=False):\n modulus, number = label.split('.')\n modulus = int(modulus)\n numbers = label_to_number(modulus, number, all=True)\n if numbers == 0:\n return \"Invalid label for Dirichlet character: %s\" % label\n if isinstance(numbers, list):\n number = numbers[0]\n def conrey_link(i):\n return \"<a href='%s'> %s.%s</a>\" % (url_for(\"characters.render_Dirichletwebpage\", modulus=modulus, number=i), modulus, i)\n if len(numbers) <= 2:\n numbers = [conrey_link(k) for k in numbers]\n else:\n numbers = [conrey_link(numbers[0]), '…', conrey_link(numbers[-1])]\n else:\n number = numbers\n numbers = None\n args={'type': 'Dirichlet', 'modulus': modulus, 'number': number}\n webchar = make_webchar(args)\n if orbit and modulus <= 10000:\n inf = \"Dirichlet character orbit %d.%s\\n\" % (modulus, webchar.orbit_label)\n else:\n inf = r\"Dirichlet character \\(\\chi_{%d}(%d, \\cdot)\\)\" % (modulus, number) + \"\\n\"\n inf += \"<div><table class='chardata'>\\n\"\n def row_wrap(header, val):\n return \"<tr><td>%s: </td><td>%s</td></tr>\\n\" % (header, val)\n inf += row_wrap('Conductor', webchar.conductor)\n inf += row_wrap('Order', webchar.order)\n inf += row_wrap('Degree', euler_phi(webchar.order))\n inf += row_wrap('Minimal', webchar.isminimal)\n inf += row_wrap('Parity', webchar.parity)\n if numbers:\n inf += row_wrap('Characters', \", \".join(numbers))\n if modulus <= 10000:\n if not orbit:\n inf += row_wrap('Orbit label', '%d.%s' % (modulus, webchar.orbit_label))\n inf += row_wrap('Orbit Index', webchar.orbit_index)\n inf += '</table></div>\\n'\n if numbers is None:\n inf += '<div align=\"right\">\\n'\n inf += '<a href=\"%s\">%s home page</a>\\n' % (str(url_for(\"characters.render_Dirichletwebpage\", modulus=modulus, number=number)), label)\n inf += '</div>\\n'\n return inf\n\ndef dirichlet_character_data(label):\n return _dir_knowl_data(label, orbit=False)\n\ndef dirichlet_orbit_data(label):\n return _dir_knowl_data(label, orbit=True)\n\[email protected]_processor\ndef ctx_dirchar():\n return {'dirichlet_character_data': dirichlet_character_data,\n 'dirichlet_orbit_data': dirichlet_orbit_data}\n\n@characters_page.route('/Dirichlet/random')\ndef random_Dirichletwebpage():\n return redirect(url_for('.render_DirichletNavigation', search_type=\"Random\"))\n\n@characters_page.route('/Dirichlet/interesting')\ndef interesting():\n return interesting_knowls(\n \"character.dirichlet\",\n db.char_dir_values,\n url_for_label=url_for_label,\n title=\"Some interesting Dirichlet characters\",\n bread=bread(\"Interesting\"),\n credit=credit(),\n learnmore=learn())\n\n@characters_page.route('/Dirichlet/stats')\ndef statistics():\n title = \"Dirichlet characters: statistics\"\n return render_template(\"display_stats.html\", info=DirichStats(), credit=credit(), title=title, bread=bread(\"Statistics\"), learnmore=learn())\n\n@characters_page.route(\"/calc-<calc>/Dirichlet/<int:modulus>/<int:number>\")\ndef dc_calc(calc, modulus, number):\n val = request.args.get(\"val\", [])\n args = {'type': 'Dirichlet', 'modulus': modulus, 'number': number}\n if not val:\n return abort(404)\n try:\n if calc == 'value':\n return WebDirichletCharacter(**args).value(val)\n if calc == 'gauss':\n return WebDirichletCharacter(**args).gauss_sum(val)\n elif calc == 'jacobi':\n return WebDirichletCharacter(**args).jacobi_sum(val)\n elif calc == 'kloosterman':\n return WebDirichletCharacter(**args).kloosterman_sum(val)\n else:\n return abort(404)\n except Warning as e:\n return \"<span style='color:gray;'>%s</span>\" % e\n except Exception:\n return \"<span style='color:red;'>Error: bad input</span>\"\n\n###############################################################################\n## TODO: refactor the following\n###############################################################################\n\n@characters_page.route(\"/Dirichlet/table\")\ndef dirichlet_table():\n args = to_dict(request.args)\n mod = args.get('modulus',1)\n return redirect(url_for('characters.render_Dirichletwebpage',modulus=mod))\n\n# FIXME: these group table pages are used by number fields pages.\n# should refactor this into WebDirichlet.py\n@characters_page.route(\"/Dirichlet/grouptable\")\ndef dirichlet_group_table(**args):\n modulus = request.args.get(\"modulus\", 1, type=int)\n info = to_dict(args)\n if \"modulus\" not in info:\n info[\"modulus\"] = modulus\n info['bread'] = bread('Group')\n info['credit'] = credit()\n char_number_list = request.args.get(\"char_number_list\",None)\n if char_number_list is not None:\n info['char_number_list'] = char_number_list\n char_number_list = [int(a) for a in char_number_list.split(',')]\n info['poly'] = request.args.get(\"poly\", '???')\n else:\n return abort(404, 'grouptable needs char_number_list argument')\n h, c = get_group_table(modulus, char_number_list)\n info['headers'] = h\n info['contents'] = c\n info['title'] = 'Group of Dirichlet characters'\n return render_template(\"CharacterGroupTable.html\", **info)\n\n\ndef get_group_table(modulus, char_list):\n # Move 1 to the front of the list\n char_list.insert(0, char_list.pop(next(j for j in range(len(char_list)) if char_list[j]==1)))\n headers = [j for j in char_list] # Just a copy\n if modulus == 1:\n rows = [[1]]\n else:\n rows = [[(j * k) % modulus for k in char_list] for j in char_list]\n return headers, rows\n\ndef yesno(x):\n return \"yes\" if x in [\"yes\", True] else \"no\"\nclass DirichStats(StatsDisplay):\n table = db.char_dir_orbits\n baseurl_func = \".render_DirichletNavigation\"\n stat_list = [\n {\"cols\": [\"conductor\"]},\n {\"cols\": [\"order\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_primitive\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_real\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_minimal\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n ]\n buckets = {\"conductor\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"],\n \"modulus\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"],\n \"order\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"]}\n knowls = {\"conductor\": \"character.dirichlet.conductor\",\n \"modulus\": \"character.dirichlet.modulus\",\n \"order\": \"character.dirichlet.order\",\n \"is_minimal\": \"character.dirichlet.minimal\",\n \"is_primitive\": \"character.dirichlet.primitive\",\n \"is_real\": \"character.dirichlet.real\"}\n short_display = {\"is_minimal\": \"minimal\",\n \"is_primitive\": \"primitive\",\n \"is_real\": \"real\"}\n top_titles = {\"order\": \"order\",\n \"is_minimal\": \"minimality\",\n \"is_primitive\": \"primitivity\",\n \"is_real\": \"real characters\"}\n formatters = {\"is_minimal\": yesno,\n \"is_primitive\": yesno,\n \"is_real\": yesno}\n\n def __init__(self):\n self.nchars = db.char_dir_values.count()\n self.norbits = db.char_dir_orbits.count()\n self.maxmod = db.char_dir_orbits.max(\"modulus\")\n\n @property\n def short_summary(self):\n return 'The database currently contains %s %s of %s up to %s, lying in %s %s. Among these, L-functions are available for characters of modulus up to 2,800 (and some of higher modulus). In addition, %s, Galois orbits and %s are available up to modulus $10^{20}$. Here are some <a href=\"%s\">futher statistics</a>.' % (\n comma(self.nchars),\n display_knowl(\"character.dirichlet\", \"Dirichlet characters\"),\n display_knowl(\"character.dirichlet.modulus\", \"modulus\"),\n comma(self.maxmod),\n comma(self.norbits),\n display_knowl(\"character.dirichlet.galois_orbit\", \"Galois orbits\"),\n display_knowl(\"character.dirichlet.basic_properties\", \"basic properties\"),\n display_knowl(\"character.dirichlet.value_field\", \"field of values\"),\n url_for(\".statistics\"))\n\n @property\n def summary(self):\n return \"The database currently contains %s %s of %s up to %s, lying in %s %s. The tables below show counts of Galois orbits.\" % (\n comma(self.nchars),\n display_knowl(\"character.dirichlet\", \"Dirichlet characters\"),\n display_knowl(\"character.dirichlet.modulus\", \"modulus\"),\n comma(self.maxmod),\n comma(self.norbits),\n display_knowl(\"character.dirichlet.galois_orbit\", \"Galois orbits\"))\n",
"path": "lmfdb/characters/main.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom lmfdb.app import app\nimport re\nfrom flask import render_template, url_for, request, redirect, abort\nfrom sage.all import gcd, euler_phi\nfrom lmfdb.utils import (\n to_dict, flash_error, SearchArray, YesNoBox, display_knowl, ParityBox,\n TextBox, CountBox, parse_bool, parse_ints, search_wrap,\n StatsDisplay, totaler, proportioners, comma)\nfrom lmfdb.utils.interesting import interesting_knowls\nfrom lmfdb.characters.utils import url_character\nfrom lmfdb.characters.web_character import (\n WebDirichletGroup,\n WebSmallDirichletGroup,\n WebDirichletCharacter,\n WebSmallDirichletCharacter,\n WebDBDirichletCharacter,\n WebDBDirichletGroup,\n)\nfrom lmfdb.characters.ListCharacters import get_character_modulus, get_character_conductor, get_character_order\nfrom lmfdb.characters import characters_page\nfrom sage.databases.cremona import class_to_int\nfrom lmfdb import db\n\n#### make url_character available from templates\[email protected]_processor\ndef ctx_characters():\n chardata = {}\n chardata['url_character'] = url_character\n return chardata\n\ndef bread(tail=[]):\n base = [('Characters',url_for(\".render_characterNavigation\")),\n ('Dirichlet', url_for(\".render_DirichletNavigation\"))]\n if not isinstance(tail, list):\n tail = [(tail, \" \")]\n return base + tail\n\ndef learn(current = None):\n r = []\n if current != 'extent':\n r.append( ('Completeness of the data', url_for(\".extent_page\")) )\n if current != 'source':\n r.append( ('Source of the data', url_for(\".how_computed_page\")) )\n if current != 'reliability':\n r.append( ('Reliability of the data', url_for(\".reliability\")) )\n if current != 'labels':\n r.append( ('Dirichlet character labels', url_for(\".labels_page\")) )\n return r\n\ndef credit():\n return \"Alex Best, Jonathan Boboer, David Lowry-Duda, and Andrew Sutherland\"\n\n###############################################################################\n# Route functions\n# Do not use url_for on these, use url_character defined in lmfdb.utils\n###############################################################################\n\n@characters_page.route(\"/\")\ndef render_characterNavigation():\n \"\"\"\n FIXME: replace query by ?browse=<key>&start=<int>&end=<int>\n \"\"\"\n return redirect(url_for(\".render_DirichletNavigation\"), 301)\n\nclass DirichSearchArray(SearchArray):\n noun = \"character\"\n plural_noun = \"characters\"\n jump_example = \"13.2\"\n jump_egspan = \"e.g. 13.2 for the Dirichlet character \\(\\displaystyle\\chi_{13}(2,·)\\),or 13.f for its Galois orbit.\"\n jump_knowl=\"character.dirichlet.search_input\"\n jump_prompt=\"Label\"\n def __init__(self):\n modulus = TextBox(\n \"modulus\",\n knowl=\"character.dirichlet.modulus\",\n label=\"Modulus\",\n example=\"13\",\n example_span=\"13\",\n )\n conductor = TextBox(\n \"conductor\",\n knowl = \"character.dirichlet.conductor\",\n label = \"Conductor\",\n example = \"5\",\n example_span = \"5 or 10,20\",\n )\n order = TextBox(\n \"order\",\n label=\"Order\",\n knowl=\"character.dirichlet.order\",\n example=\"2\",\n example_span=\"2 or 3-5\"\n )\n parity = ParityBox(\n \"parity\",\n knowl=\"character.dirichlet.parity\",\n label=\"Parity\",\n example=\"odd\"\n )\n is_primitive = YesNoBox(\n \"is_primitive\",\n label=\"Primitive\",\n knowl=\"character.dirichlet.primitive\",\n example=\"yes\"\n )\n is_real = YesNoBox(\n \"is_real\",\n label=\"Real\",\n knowl=\"character.dirichlet.real\",\n example=\"yes\"\n )\n is_minimal = YesNoBox(\n \"is_minimal\",\n label=\"Minimal\",\n knowl=\"character.dirichlet.minimal\",\n example=\"yes\"\n )\n count = CountBox()\n\n self.refine_array = [\n [modulus, conductor, order, is_real], [parity, is_primitive, is_minimal, count],\n ]\n self.browse_array = [\n [modulus],\n [conductor],\n [order],\n [parity],\n [is_primitive],\n [is_real],\n [is_minimal],\n [count],\n ]\n\n def search_types(self, info):\n return self._search_again(info, [\n ('List', 'List of characters'),\n ('Random', 'Random character')])\n\ndef common_parse(info, query):\n parse_ints(info, query, \"modulus\", name=\"modulus\")\n parse_ints(info, query, \"conductor\", name=\"conductor\")\n parse_ints(info, query, \"order\", name=\"order\")\n if 'parity' in info:\n parity=info['parity']\n if parity == 'even':\n query['parity'] = 1\n elif parity == 'odd':\n query['parity'] = -1\n parse_bool(info, query, \"is_primitive\", name=\"is_primitive\")\n parse_bool(info, query, \"is_real\", name=\"is_real\")\n parse_bool(info, query, \"is_minimal\", name=\"is_minimal\")\n\ndef validate_label(label):\n modulus, number = label.split('.')\n modulus = int(modulus)\n numbers = label_to_number(modulus, number, all=True)\n if numbers == 0:\n raise ValueError(\"it must be of the form modulus.number, with modulus and number natural numbers\")\n return True\n\ndef jump(info):\n jump_box = info[\"jump\"].strip() # only called when this present\n try:\n validate_label(jump_box)\n except ValueError as err:\n flash_error(\"%s is not a valid label: %s.\", jump_box, str(err))\n return redirect(url_for_label(jump_box))\n\ndef url_for_label(label):\n label = label.replace(\" \", \"\")\n try:\n validate_label(label)\n except ValueError as err:\n flash_error(\"%s is not a valid label: %s.\", label, str(err))\n return redirect(url_for(\".render_DirichletNavigation\"))\n modulus, number = label.split(\".\")\n modulus = int(modulus)\n number = label_to_number(modulus, number)\n return url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number)\n\n@search_wrap(\n template=\"character_search_results.html\",\n table=db.char_dir_orbits,\n title=\"Dirichlet character search results\",\n err_title=\"Dirichlet character search input error\",\n shortcuts={ \"jump\": jump },\n url_for_label=url_for_label,\n learnmore=learn,\n random_projection=\"label\",\n bread=lambda: bread(\"Search results\"),\n credit=credit,\n)\ndef dirichlet_character_search(info, query):\n common_parse(info, query)\n\ndef label_to_number(modulus, number, all=False):\n \"\"\"\n Takes the second part of a character label and converts it to the second\n part of a Conrey label. This could be trivial (just casting to an int)\n or could require converting from an orbit label to a number.\n\n If the label is invalid, returns 0.\n \"\"\"\n try:\n number = int(number)\n except ValueError:\n # encoding Galois orbit\n if modulus < 10000:\n try:\n orbit_label = '{0}.{1}'.format(modulus, 1 + class_to_int(number))\n except ValueError:\n return 0\n else:\n number = db.char_dir_orbits.lucky({'orbit_label':orbit_label}, 'galois_orbit')\n if number is None:\n return 0\n if not all:\n number = number[0]\n else:\n return 0\n else:\n if number <= 0 or gcd(modulus, number) != 1 or number > modulus:\n return 0\n return number\n\n@characters_page.route(\"/Dirichlet\")\n@characters_page.route(\"/Dirichlet/\")\ndef render_DirichletNavigation():\n try:\n if 'modbrowse' in request.args:\n arg = request.args['modbrowse']\n arg = arg.split('-')\n modulus_start = int(arg[0])\n modulus_end = int(arg[1])\n info = {'args': request.args}\n info['title'] = 'Dirichlet characters of modulus ' + str(modulus_start) + '-' + str(modulus_end)\n info['bread'] = bread('Modulus')\n info['learnmore'] = learn()\n info['credit'] = credit()\n h, c, rows, cols = get_character_modulus(modulus_start, modulus_end)\n info['contents'] = c\n info['headers'] = h\n info['rows'] = rows\n info['cols'] = cols\n return render_template(\"ModulusList.html\", **info)\n\n elif 'condbrowse' in request.args:\n arg = request.args['condbrowse']\n arg = arg.split('-')\n conductor_start = int(arg[0])\n conductor_end = int(arg[1])\n info = {'args': request.args}\n info['bread'] = bread('Conductor')\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['conductor_start'] = conductor_start\n info['conductor_end'] = conductor_end\n info['title'] = 'Dirichlet characters of conductor ' + str(conductor_start) + '-' + str(conductor_end)\n info['contents'] = get_character_conductor(conductor_start, conductor_end + 1)\n return render_template(\"ConductorList.html\", **info)\n\n elif 'ordbrowse' in request.args:\n arg = request.args['ordbrowse']\n arg = arg.split('-')\n order_start = int(arg[0])\n order_end = int(arg[1])\n info = {'args': request.args}\n info['bread'] = bread('Order')\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['order_start'] = order_start\n info['order_end'] = order_end\n info['title'] = 'Dirichlet characters of orders ' + str(order_start) + '-' + str(order_end)\n info['contents'] = get_character_order(order_start, order_end + 1)\n return render_template(\"OrderList.html\", **info)\n except ValueError as err:\n flash_error(\"Error raised in parsing: %s\", err)\n return render_template('CharacterNavigate.html', title='Dirichlet characters', bread=bread(), learnmore=learn(), credit=credit())\n\n if request.args:\n # hidden_search_type for prev/next buttons\n info = to_dict(request.args, search_array=DirichSearchArray())\n info[\"search_type\"] = search_type = info.get(\"search_type\", info.get(\"hst\", \"List\"))\n if search_type in ['List', 'Random']:\n return dirichlet_character_search(info)\n assert False\n info = to_dict(request.args, search_array=DirichSearchArray(), stats=DirichStats())\n info['bread'] = bread()\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['title'] = 'Dirichlet characters'\n return render_template('CharacterNavigate.html', info=info,**info)\n\n\n@characters_page.route(\"/Dirichlet/Labels\")\ndef labels_page():\n info = {}\n info['title'] = 'Dirichlet character labels'\n info['bread'] = bread('Labels')\n info['learnmore'] = learn('labels')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='character.dirichlet.conrey', **info)\n\n@characters_page.route(\"/Dirichlet/Source\")\ndef how_computed_page():\n info = {}\n info['title'] = 'Source of Dirichlet character data'\n info['bread'] = bread('Source')\n info['learnmore'] = learn('source')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='rcs.source.character.dirichlet', **info)\n\n@characters_page.route(\"/Dirichlet/Reliability\")\ndef reliability():\n info = {}\n info['title'] = 'Reliability of Dirichlet character data'\n info['bread'] = bread('Reliability')\n info['learnmore'] = learn('reliability')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='rcs.rigor.character.dirichlet', **info)\n\n@characters_page.route(\"/Dirichlet/Completeness\")\ndef extent_page():\n info = {}\n info['title'] = 'Completeness of Dirichlet character data'\n info['bread'] = bread('Extent')\n info['learnmore'] = learn('extent')\n info['credit'] = credit()\n return render_template(\"single.html\", kid='dq.character.dirichlet.extent',\n **info)\n\ndef make_webchar(args):\n modulus = int(args['modulus'])\n if modulus < 10000:\n return WebDBDirichletCharacter(**args)\n elif modulus < 100000:\n return WebDirichletCharacter(**args)\n else:\n return WebSmallDirichletCharacter(**args)\n\n@characters_page.route(\"/Dirichlet/<modulus>\")\n@characters_page.route(\"/Dirichlet/<modulus>/\")\n@characters_page.route(\"/Dirichlet/<modulus>/<number>\")\ndef render_Dirichletwebpage(modulus=None, number=None):\n\n modulus = modulus.replace(' ','')\n if number is None and re.match(r'^[1-9][0-9]*\\.([1-9][0-9]*|[a-z]+)$', modulus):\n modulus, number = modulus.split('.')\n return redirect(url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number), 301)\n\n args={}\n args['type'] = 'Dirichlet'\n args['modulus'] = modulus\n args['number'] = number\n try:\n modulus = int(modulus)\n except ValueError:\n modulus = 0\n if modulus <= 0:\n flash_error(\"%s is not a valid modulus for a Dirichlet character. It should be a positive integer.\", args['modulus'])\n return redirect(url_for(\".render_DirichletNavigation\"))\n if modulus > 10**20:\n flash_error(\"specified modulus %s is too large, it should be less than $10^{20}$.\", modulus)\n return redirect(url_for(\".render_DirichletNavigation\"))\n\n\n\n if number is None:\n if modulus < 10000:\n info = WebDBDirichletGroup(**args).to_dict()\n info['show_orbit_label'] = True\n elif modulus < 100000:\n info = WebDirichletGroup(**args).to_dict()\n else:\n info = WebSmallDirichletGroup(**args).to_dict()\n info['title'] = 'Group of Dirichlet characters of modulus ' + str(modulus)\n info['bread'] = bread([('%d'%modulus, url_for(\".render_Dirichletwebpage\", modulus=modulus))])\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['code'] = dict([(k[4:],info[k]) for k in info if k[0:4] == \"code\"])\n info['code']['show'] = { lang:'' for lang in info['codelangs'] } # use default show names\n if 'gens' in info:\n info['generators'] = ', '.join([r'<a href=\"%s\">$\\chi_{%s}(%s,\\cdot)$'%(url_for(\".render_Dirichletwebpage\",modulus=modulus,number=g),modulus,g) for g in info['gens']])\n return render_template('CharGroup.html', **info)\n\n number = label_to_number(modulus, number)\n if number == 0:\n flash_error(\n \"the value %s is invalid. It should either be a positive integer \"\n \"coprime to and no greater than the modulus %s, or a letter that \"\n \"corresponds to a valid orbit index.\", args['number'], args['modulus']\n )\n return redirect(url_for(\".render_DirichletNavigation\"))\n args['number'] = number\n webchar = make_webchar(args)\n info = webchar.to_dict()\n info['bread'] = bread(\n [('%s'%modulus, url_for(\".render_Dirichletwebpage\", modulus=modulus)),\n ('%s'%number, url_for(\".render_Dirichletwebpage\", modulus=modulus, number=number)) ])\n info['learnmore'] = learn()\n info['credit'] = credit()\n info['code'] = dict([(k[4:],info[k]) for k in info if k[0:4] == \"code\"])\n info['code']['show'] = { lang:'' for lang in info['codelangs'] } # use default show names\n info['KNOWL_ID'] = 'character.dirichlet.%s.%s' % (modulus, number)\n return render_template('Character.html', **info)\n\ndef _dir_knowl_data(label, orbit=False):\n modulus, number = label.split('.')\n modulus = int(modulus)\n numbers = label_to_number(modulus, number, all=True)\n if numbers == 0:\n return \"Invalid label for Dirichlet character: %s\" % label\n if isinstance(numbers, list):\n number = numbers[0]\n def conrey_link(i):\n return \"<a href='%s'> %s.%s</a>\" % (url_for(\"characters.render_Dirichletwebpage\", modulus=modulus, number=i), modulus, i)\n if len(numbers) <= 2:\n numbers = [conrey_link(k) for k in numbers]\n else:\n numbers = [conrey_link(numbers[0]), '…', conrey_link(numbers[-1])]\n else:\n number = numbers\n numbers = None\n args={'type': 'Dirichlet', 'modulus': modulus, 'number': number}\n webchar = make_webchar(args)\n if orbit and modulus <= 10000:\n inf = \"Dirichlet character orbit %d.%s\\n\" % (modulus, webchar.orbit_label)\n else:\n inf = r\"Dirichlet character \\(\\chi_{%d}(%d, \\cdot)\\)\" % (modulus, number) + \"\\n\"\n inf += \"<div><table class='chardata'>\\n\"\n def row_wrap(header, val):\n return \"<tr><td>%s: </td><td>%s</td></tr>\\n\" % (header, val)\n inf += row_wrap('Conductor', webchar.conductor)\n inf += row_wrap('Order', webchar.order)\n inf += row_wrap('Degree', euler_phi(webchar.order))\n inf += row_wrap('Minimal', webchar.isminimal)\n inf += row_wrap('Parity', webchar.parity)\n if numbers:\n inf += row_wrap('Characters', \", \".join(numbers))\n if modulus <= 10000:\n if not orbit:\n inf += row_wrap('Orbit label', '%d.%s' % (modulus, webchar.orbit_label))\n inf += row_wrap('Orbit Index', webchar.orbit_index)\n inf += '</table></div>\\n'\n if numbers is None:\n inf += '<div align=\"right\">\\n'\n inf += '<a href=\"%s\">%s home page</a>\\n' % (str(url_for(\"characters.render_Dirichletwebpage\", modulus=modulus, number=number)), label)\n inf += '</div>\\n'\n return inf\n\ndef dirichlet_character_data(label):\n return _dir_knowl_data(label, orbit=False)\n\ndef dirichlet_orbit_data(label):\n return _dir_knowl_data(label, orbit=True)\n\[email protected]_processor\ndef ctx_dirchar():\n return {'dirichlet_character_data': dirichlet_character_data,\n 'dirichlet_orbit_data': dirichlet_orbit_data}\n\n@characters_page.route('/Dirichlet/random')\ndef random_Dirichletwebpage():\n return redirect(url_for('.render_DirichletNavigation', search_type=\"Random\"))\n\n@characters_page.route('/Dirichlet/interesting')\ndef interesting():\n return interesting_knowls(\n \"character.dirichlet\",\n db.char_dir_values,\n url_for_label=url_for_label,\n title=\"Some interesting Dirichlet characters\",\n bread=bread(\"Interesting\"),\n credit=credit(),\n learnmore=learn())\n\n@characters_page.route('/Dirichlet/stats')\ndef statistics():\n title = \"Dirichlet characters: statistics\"\n return render_template(\"display_stats.html\", info=DirichStats(), credit=credit(), title=title, bread=bread(\"Statistics\"), learnmore=learn())\n\n@characters_page.route(\"/calc-<calc>/Dirichlet/<int:modulus>/<int:number>\")\ndef dc_calc(calc, modulus, number):\n val = request.args.get(\"val\", [])\n args = {'type': 'Dirichlet', 'modulus': modulus, 'number': number}\n if not val:\n return abort(404)\n try:\n if calc == 'value':\n return WebDirichletCharacter(**args).value(val)\n if calc == 'gauss':\n return WebDirichletCharacter(**args).gauss_sum(val)\n elif calc == 'jacobi':\n return WebDirichletCharacter(**args).jacobi_sum(val)\n elif calc == 'kloosterman':\n return WebDirichletCharacter(**args).kloosterman_sum(val)\n else:\n return abort(404)\n except Warning as e:\n return \"<span style='color:gray;'>%s</span>\" % e\n except Exception:\n return \"<span style='color:red;'>Error: bad input</span>\"\n\n###############################################################################\n## TODO: refactor the following\n###############################################################################\n\n@characters_page.route(\"/Dirichlet/table\")\ndef dirichlet_table():\n args = to_dict(request.args)\n mod = args.get('modulus',1)\n return redirect(url_for('characters.render_Dirichletwebpage',modulus=mod))\n\n# FIXME: these group table pages are used by number fields pages.\n# should refactor this into WebDirichlet.py\n@characters_page.route(\"/Dirichlet/grouptable\")\ndef dirichlet_group_table(**args):\n modulus = request.args.get(\"modulus\", 1, type=int)\n info = to_dict(args)\n if \"modulus\" not in info:\n info[\"modulus\"] = modulus\n info['bread'] = bread('Group')\n info['credit'] = credit()\n char_number_list = request.args.get(\"char_number_list\",None)\n if char_number_list is not None:\n info['char_number_list'] = char_number_list\n char_number_list = [int(a) for a in char_number_list.split(',')]\n info['poly'] = request.args.get(\"poly\", '???')\n else:\n return abort(404, 'grouptable needs char_number_list argument')\n h, c = get_group_table(modulus, char_number_list)\n info['headers'] = h\n info['contents'] = c\n info['title'] = 'Group of Dirichlet characters'\n return render_template(\"CharacterGroupTable.html\", **info)\n\n\ndef get_group_table(modulus, char_list):\n # Move 1 to the front of the list\n char_list.insert(0, char_list.pop(next(j for j in range(len(char_list)) if char_list[j]==1)))\n headers = [j for j in char_list] # Just a copy\n if modulus == 1:\n rows = [[1]]\n else:\n rows = [[(j * k) % modulus for k in char_list] for j in char_list]\n return headers, rows\n\ndef yesno(x):\n return \"yes\" if x in [\"yes\", True] else \"no\"\nclass DirichStats(StatsDisplay):\n table = db.char_dir_orbits\n baseurl_func = \".render_DirichletNavigation\"\n stat_list = [\n {\"cols\": [\"conductor\"]},\n {\"cols\": [\"order\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_primitive\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_real\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n {\"cols\": [\"is_minimal\", \"modulus\"],\n \"title_joiner\": \" by \",\n \"totaler\": totaler(),\n \"proportioner\": proportioners.per_col_total},\n ]\n buckets = {\"conductor\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"],\n \"modulus\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"],\n \"order\": [\"1-10\", \"11-100\", \"101-1000\", \"1001-10000\"]}\n knowls = {\"conductor\": \"character.dirichlet.conductor\",\n \"modulus\": \"character.dirichlet.modulus\",\n \"order\": \"character.dirichlet.order\",\n \"is_minimal\": \"character.dirichlet.minimal\",\n \"is_primitive\": \"character.dirichlet.primitive\",\n \"is_real\": \"character.dirichlet.real\"}\n short_display = {\"is_minimal\": \"minimal\",\n \"is_primitive\": \"primitive\",\n \"is_real\": \"real\"}\n top_titles = {\"order\": \"order\",\n \"is_minimal\": \"minimality\",\n \"is_primitive\": \"primitivity\",\n \"is_real\": \"real characters\"}\n formatters = {\"is_minimal\": yesno,\n \"is_primitive\": yesno,\n \"is_real\": yesno}\n\n def __init__(self):\n self.nchars = db.char_dir_values.count()\n self.norbits = db.char_dir_orbits.count()\n self.maxmod = db.char_dir_orbits.max(\"modulus\")\n\n @property\n def short_summary(self):\n return 'The database currently contains %s %s of %s up to %s, lying in %s %s. Among these, L-functions are available for characters of modulus up to 2,800 (and some of higher modulus). In addition, %s, Galois orbits and %s are available up to modulus $10^{20}$. Here are some <a href=\"%s\">futher statistics</a>.' % (\n comma(self.nchars),\n display_knowl(\"character.dirichlet\", \"Dirichlet characters\"),\n display_knowl(\"character.dirichlet.modulus\", \"modulus\"),\n comma(self.maxmod),\n comma(self.norbits),\n display_knowl(\"character.dirichlet.galois_orbit\", \"Galois orbits\"),\n display_knowl(\"character.dirichlet.basic_properties\", \"basic properties\"),\n display_knowl(\"character.dirichlet.value_field\", \"field of values\"),\n url_for(\".statistics\"))\n\n @property\n def summary(self):\n return \"The database currently contains %s %s of %s up to %s, lying in %s %s. The tables below show counts of Galois orbits.\" % (\n comma(self.nchars),\n display_knowl(\"character.dirichlet\", \"Dirichlet characters\"),\n display_knowl(\"character.dirichlet.modulus\", \"modulus\"),\n comma(self.maxmod),\n comma(self.norbits),\n display_knowl(\"character.dirichlet.galois_orbit\", \"Galois orbits\"))\n",
"path": "lmfdb/characters/main.py"
}
] | diff --git a/lmfdb/characters/main.py b/lmfdb/characters/main.py
index da37849a43..0c4f779909 100644
--- a/lmfdb/characters/main.py
+++ b/lmfdb/characters/main.py
@@ -189,6 +189,7 @@ def url_for_label(label):
shortcuts={ "jump": jump },
url_for_label=url_for_label,
learnmore=learn,
+ random_projection="label",
bread=lambda: bread("Search results"),
credit=credit,
)
|
scverse__scanpy-997 | `datasets.pbmc68k_reduced` isn't contained in the pypi package anymore
This still works in `1.4.4.post1`. It's very likely caused by changes to `setup.py`. I experienced similar problems before and fixed them via `package_data`. But this got removed. It's probably only a problem for the source-based installs.
https://github.com/theislab/scanpy/commit/881f0bef31cdfe0df7333641dc847a60894b5c41#diff-2eeaed663bd0d25b7e608891384b7298
```
>>> import scanpy
>>> scanpy.__version__
<Version('1.4.5.post2')>
>>> scanpy.datasets.pbmc68k_reduced()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/datasets/__init__.py", line 239, in pbmc68k_reduced
return read(filename)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py", line 114, in read
**kwargs,
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py", line 524, in _read
return read_h5ad(filename, backed=backed)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py", line 447, in read_h5ad
constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py", line 481, in _read_args_from_h5ad
f = h5py.File(filename, 'r')
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/h5py/h5sparse.py", line 162, in __init__
**kwds,
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py", line 312, in __init__
fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py", line 142, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 78, in h5py.h5f.open
```
| [
{
"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\n\ntry:\n from scanpy import __author__, __email__\nexcept ImportError: # Deps not yet installed\n __author__ = __email__ = ''\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=__author__,\n author_email=__email__,\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n rapids=['cudf', 'cuml', 'cugraph'],\n magic=['magic-impute>=2.0'],\n doc=[\n 'sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.4.3',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'black',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\n\ntry:\n from scanpy import __author__, __email__\nexcept ImportError: # Deps not yet installed\n __author__ = __email__ = ''\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=__author__,\n author_email=__email__,\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n rapids=['cudf', 'cuml', 'cugraph'],\n magic=['magic-impute>=2.0'],\n doc=[\n 'sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.4.3',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'black',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n include_package_data=True,\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 9e6cdfa2fb..2dcff3cdec 100644
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,7 @@
],
),
packages=find_packages(),
+ include_package_data=True,
entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),
zip_safe=False,
classifiers=[
|
cognitedata__cognite-sdk-python-291 | client.time_series.get_time_series does not return metadata
**Describe the bug**
When executing `client.time_series.get_time_series()` with `include_metadata = True` no metadata is returned.
**To Reproduce**
Runnable code reproducing the error.
```
import cognite
import requests
import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from cognite.client.stable.time_series import TimeSeries
sm_api = os.environ['SM_API_KEY']
client = cognite.CogniteClient(api_key = sm_api)
ts_name = 'Test_tssssss'
my_time_series = [TimeSeries(name=ts_name,
description = 'test_description',
metadata = { 'ASSETSCOPENAME' : 'meta_test_1' })]
client.time_series.post_time_series(my_time_series)
# create dummy data
np.random.seed(1338)
start_time = int((datetime.now()-timedelta(1)).strftime("%s"))
timestamps = [(start_time + i * 10)*1000 for i in np.arange(11)]
df = pd.DataFrame({'timestamp' : timestamps})
df[ts_name] = np.random.random(df.shape[0])
client.datapoints.post_datapoints_frame(df)
# get time_series
ts1 = client.time_series.get_time_series(name = ts_name,
include_metadata = True).to_pandas()
ts1_id = ts1['id'].loc[0]
print(ts1.loc[0])
# no meta data
# requests:
# first with no metadata
r1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,
headers= { 'Api-Key' : sm_api} , params = {"includeMetadata" : False})
print(r1.text.split('\n'))
# then with metadata
r1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,
headers= { 'Api-Key' : sm_api} , params = {"includeMetadata" : True})
print(r1.text.split('\n'))
```
**Expected behavior**
The `client.time_series.get_time_series(name = ts_name,include_metadata = True)` should return the metadata.
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom copy import deepcopy\nfrom typing import List\nfrom urllib.parse import quote\n\nimport pandas as pd\n\nfrom cognite.client._api_client import APIClient, CogniteCollectionResponse, CogniteResource, CogniteResponse\n\n\nclass TimeSeriesResponse(CogniteResponse):\n \"\"\"Time series Response Object\"\"\"\n\n def __init__(self, internal_representation):\n super().__init__(internal_representation)\n item = self.to_json()\n self.id = item.get(\"id\")\n self.name = item.get(\"name\")\n self.unit = item.get(\"unit\")\n self.is_step = item.get(\"isStep\")\n self.is_string = item.get(\"isString\")\n self.created_time = item.get(\"createdTime\")\n self.last_updated_time = item.get(\"lastUpdatedTime\")\n self.metadata = item.get(\"metadata\")\n self.asset_id = item.get(\"assetId\")\n self.description = item.get(\"description\")\n\n def to_pandas(self):\n \"\"\"Returns data as a pandas dataframe\"\"\"\n if len(self.to_json()) > 0:\n ts = self.to_json().copy()\n if \"metadata\" in ts:\n # Hack to avoid path ending up as first element in dict as from_dict will fail\n metadata = ts.pop(\"metadata\")\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n df.loc[\"metadata\"] = [metadata]\n else:\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n return df\n return pd.DataFrame()\n\n\nclass TimeSeriesListResponse(CogniteCollectionResponse):\n \"\"\"Time series Response Object\"\"\"\n\n _RESPONSE_CLASS = TimeSeriesResponse\n\n def to_pandas(self, include_metadata: bool = False):\n \"\"\"Returns data as a pandas dataframe\n\n Args:\n include_metadata (bool): Whether or not to include metadata fields in the resulting dataframe\n \"\"\"\n items = deepcopy(self.internal_representation[\"data\"][\"items\"])\n if items and items[0].get(\"metadata\") is None:\n return pd.DataFrame(items)\n for d in items:\n if d.get(\"metadata\"):\n metadata = d.pop(\"metadata\")\n if include_metadata:\n d.update(metadata)\n return pd.DataFrame(items)\n\n\nclass TimeSeries(CogniteResource):\n \"\"\"Data Transfer Object for a time series.\n\n Args:\n name (str): Unique name of time series.\n is_string (bool): Whether the time series is string valued or not.\n metadata (dict): Metadata.\n unit (str): Physical unit of the time series.\n asset_id (int): Asset that this time series belongs to.\n description (str): Description of the time series.\n security_categories (list(int)): Security categories required in order to access this time series.\n is_step (bool): Whether or not the time series is a step series.\n\n \"\"\"\n\n def __init__(\n self,\n name,\n is_string=False,\n metadata=None,\n unit=None,\n asset_id=None,\n description=None,\n security_categories=None,\n is_step=None,\n ):\n self.name = name\n self.is_string = is_string\n self.metadata = metadata\n self.unit = unit\n self.asset_id = asset_id\n self.description = description\n self.security_categories = security_categories\n self.is_step = is_step\n\n\nclass TimeSeriesClient(APIClient):\n def __init__(self, **kwargs):\n super().__init__(version=\"0.5\", **kwargs)\n\n def get_time_series(\n self, prefix=None, description=None, include_metadata=False, asset_id=None, path=None, **kwargs\n ) -> TimeSeriesListResponse:\n \"\"\"Returns an object containing the requested timeseries.\n\n Args:\n prefix (str): List timeseries with this prefix in the name.\n\n description (str): Filter timeseries taht contains this string in its description.\n\n include_metadata (bool): Decide if the metadata field should be returned or not. Defaults to False.\n\n asset_id (int): Get timeseries related to this asset.\n\n path (List[int]): Get timeseries under this asset path branch.\n\n Keyword Arguments:\n limit (int): Number of results to return.\n\n autopaging (bool): Whether or not to automatically page through results. If set to true, limit will be\n disregarded. Defaults to False.\n\n Returns:\n stable.time_series.TimeSeriesListResponse: A data object containing the requested timeseries with several getter methods with different\n output formats.\n\n Examples:\n Get all time series for a given asset::\n\n client = CogniteClient()\n res = client.time_series.get_time_series(asset_id=123, autopaging=True)\n print(res.to_pandas())\n \"\"\"\n autopaging = kwargs.get(\"autopaging\", False)\n url = \"/timeseries\"\n params = {\n \"q\": prefix,\n \"description\": description,\n \"includeMetadata\": include_metadata,\n \"assetId\": asset_id,\n \"path\": str(path) if path else None,\n \"limit\": kwargs.get(\"limit\", self._LIMIT) if not autopaging else self._LIMIT,\n }\n\n res = self._get(url=url, params=params, autopaging=autopaging)\n return TimeSeriesListResponse(res.json())\n\n def post_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Create a new time series.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to create.\n\n Returns:\n None\n\n Examples:\n Create a new time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\")]\n\n client.time_series.post_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._post(url, body=body)\n\n def update_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Update an existing time series.\n\n For each field that can be updated, a null value indicates that nothing should be done.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to update.\n\n Returns:\n None\n\n Examples:\n Update the unit of a time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\", unit=\"celsius\")]\n\n client.time_series.update_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._put(url, body=body)\n\n def delete_time_series(self, name) -> None:\n \"\"\"Delete a timeseries.\n\n Args:\n name (str): Name of timeseries to delete.\n\n Returns:\n None\n\n Examples:\n Delete a time series by name::\n\n client = CogniteClient()\n\n client.time_series.delete_time_series(name=\"my_ts_1\")\n \"\"\"\n url = \"/timeseries/{}\".format(quote(name, safe=\"\"))\n self._delete(url)\n",
"path": "cognite/client/stable/time_series.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom copy import deepcopy\nfrom typing import List\nfrom urllib.parse import quote\n\nimport pandas as pd\n\nfrom cognite.client._api_client import APIClient, CogniteCollectionResponse, CogniteResource, CogniteResponse\n\n\nclass TimeSeriesResponse(CogniteResponse):\n \"\"\"Time series Response Object\"\"\"\n\n def __init__(self, internal_representation):\n super().__init__(internal_representation)\n item = self.to_json()\n self.id = item.get(\"id\")\n self.name = item.get(\"name\")\n self.unit = item.get(\"unit\")\n self.is_step = item.get(\"isStep\")\n self.is_string = item.get(\"isString\")\n self.created_time = item.get(\"createdTime\")\n self.last_updated_time = item.get(\"lastUpdatedTime\")\n self.metadata = item.get(\"metadata\")\n self.asset_id = item.get(\"assetId\")\n self.description = item.get(\"description\")\n\n def to_pandas(self):\n \"\"\"Returns data as a pandas dataframe\"\"\"\n if len(self.to_json()) > 0:\n ts = self.to_json().copy()\n if \"metadata\" in ts:\n # Hack to avoid path ending up as first element in dict as from_dict will fail\n metadata = ts.pop(\"metadata\")\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n df.loc[\"metadata\"] = [metadata]\n else:\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n return df\n return pd.DataFrame()\n\n\nclass TimeSeriesListResponse(CogniteCollectionResponse):\n \"\"\"Time series Response Object\"\"\"\n\n _RESPONSE_CLASS = TimeSeriesResponse\n\n def to_pandas(self, include_metadata: bool = True):\n \"\"\"Returns data as a pandas dataframe\n\n Args:\n include_metadata (bool): Whether or not to include metadata fields in the resulting dataframe\n \"\"\"\n items = deepcopy(self.internal_representation[\"data\"][\"items\"])\n if items and items[0].get(\"metadata\") is None:\n return pd.DataFrame(items)\n for d in items:\n if d.get(\"metadata\"):\n metadata = d.pop(\"metadata\")\n if include_metadata:\n d.update(metadata)\n return pd.DataFrame(items)\n\n\nclass TimeSeries(CogniteResource):\n \"\"\"Data Transfer Object for a time series.\n\n Args:\n name (str): Unique name of time series.\n is_string (bool): Whether the time series is string valued or not.\n metadata (dict): Metadata.\n unit (str): Physical unit of the time series.\n asset_id (int): Asset that this time series belongs to.\n description (str): Description of the time series.\n security_categories (list(int)): Security categories required in order to access this time series.\n is_step (bool): Whether or not the time series is a step series.\n\n \"\"\"\n\n def __init__(\n self,\n name,\n is_string=False,\n metadata=None,\n unit=None,\n asset_id=None,\n description=None,\n security_categories=None,\n is_step=None,\n ):\n self.name = name\n self.is_string = is_string\n self.metadata = metadata\n self.unit = unit\n self.asset_id = asset_id\n self.description = description\n self.security_categories = security_categories\n self.is_step = is_step\n\n\nclass TimeSeriesClient(APIClient):\n def __init__(self, **kwargs):\n super().__init__(version=\"0.5\", **kwargs)\n\n def get_time_series(\n self, prefix=None, description=None, include_metadata=False, asset_id=None, path=None, **kwargs\n ) -> TimeSeriesListResponse:\n \"\"\"Returns an object containing the requested timeseries.\n\n Args:\n prefix (str): List timeseries with this prefix in the name.\n\n description (str): Filter timeseries taht contains this string in its description.\n\n include_metadata (bool): Decide if the metadata field should be returned or not. Defaults to False.\n\n asset_id (int): Get timeseries related to this asset.\n\n path (List[int]): Get timeseries under this asset path branch.\n\n Keyword Arguments:\n limit (int): Number of results to return.\n\n autopaging (bool): Whether or not to automatically page through results. If set to true, limit will be\n disregarded. Defaults to False.\n\n Returns:\n stable.time_series.TimeSeriesListResponse: A data object containing the requested timeseries with several getter methods with different\n output formats.\n\n Examples:\n Get all time series for a given asset::\n\n client = CogniteClient()\n res = client.time_series.get_time_series(asset_id=123, autopaging=True)\n print(res.to_pandas())\n \"\"\"\n autopaging = kwargs.get(\"autopaging\", False)\n url = \"/timeseries\"\n params = {\n \"q\": prefix,\n \"description\": description,\n \"includeMetadata\": include_metadata,\n \"assetId\": asset_id,\n \"path\": str(path) if path else None,\n \"limit\": kwargs.get(\"limit\", self._LIMIT) if not autopaging else self._LIMIT,\n }\n\n res = self._get(url=url, params=params, autopaging=autopaging)\n return TimeSeriesListResponse(res.json())\n\n def post_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Create a new time series.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to create.\n\n Returns:\n None\n\n Examples:\n Create a new time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\")]\n\n client.time_series.post_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._post(url, body=body)\n\n def update_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Update an existing time series.\n\n For each field that can be updated, a null value indicates that nothing should be done.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to update.\n\n Returns:\n None\n\n Examples:\n Update the unit of a time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\", unit=\"celsius\")]\n\n client.time_series.update_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._put(url, body=body)\n\n def delete_time_series(self, name) -> None:\n \"\"\"Delete a timeseries.\n\n Args:\n name (str): Name of timeseries to delete.\n\n Returns:\n None\n\n Examples:\n Delete a time series by name::\n\n client = CogniteClient()\n\n client.time_series.delete_time_series(name=\"my_ts_1\")\n \"\"\"\n url = \"/timeseries/{}\".format(quote(name, safe=\"\"))\n self._delete(url)\n",
"path": "cognite/client/stable/time_series.py"
}
] | diff --git a/cognite/client/stable/time_series.py b/cognite/client/stable/time_series.py
index 26708c4fcb..a002097efc 100644
--- a/cognite/client/stable/time_series.py
+++ b/cognite/client/stable/time_series.py
@@ -45,7 +45,7 @@ class TimeSeriesListResponse(CogniteCollectionResponse):
_RESPONSE_CLASS = TimeSeriesResponse
- def to_pandas(self, include_metadata: bool = False):
+ def to_pandas(self, include_metadata: bool = True):
"""Returns data as a pandas dataframe
Args:
|
pymodbus-dev__pymodbus-1197 | client.ModbusClientMixin doesn not have __init__, but ModbusBaseClient tries to call it
During its initialization class ModbusBaseClient tries to call super().\_\_init\_\_(), even though ModbusClientMixin does not have \_\_init\_\_().
Usually it is not a problem.
However, if later one tries to inherit from, for example, ModbusTcpClient and from another class which has \_\_init\_\_() - that class is being called twice, with unexpected consequences:
```python
from pymodbus.client.tcp import *
class SyncClientMixin:
def __init__(self, **kwargs):
print("This is gonna be called twice")
class TcpClientWrapper(ModbusTcpClient, SyncClientMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
SyncClientMixin.__init__(self, **kwargs)
wrap = TcpClientWrapper(host = 'localhost')
```
The resolution is to have an empty \_\_init\_\_ in ModbusClientMixin
| [
{
"content": "\"\"\"Modbus Client Common.\"\"\"\nimport logging\nfrom typing import List, Union\n\nimport pymodbus.bit_read_message as pdu_bit_read\nimport pymodbus.bit_write_message as pdu_bit_write\nimport pymodbus.diag_message as pdu_diag\nimport pymodbus.other_message as pdu_other_msg\nimport pymodbus.register_read_message as pdu_reg_read\nimport pymodbus.register_write_message as pdu_req_write\nfrom pymodbus.constants import Defaults\nfrom pymodbus.pdu import ModbusRequest, ModbusResponse\nfrom pymodbus.utilities import ModbusTransactionState\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass ModbusClientMixin: # pylint: disable=too-many-public-methods\n \"\"\"**ModbusClientMixin**.\n\n Simple modbus message call::\n\n response = client.read_coils(1, 10)\n # or\n response = await client.read_coils(1, 10)\n\n Advanced modbus message call::\n\n request = ReadCoilsRequest(1,10)\n response = client.execute(request)\n # or\n request = ReadCoilsRequest(1,10)\n response = await client.execute(request)\n\n .. tip::\n All methods can be used directly (synchronous) or with await <method>\n depending on the instantiated client.\n \"\"\"\n\n state = ModbusTransactionState.IDLE\n last_frame_end = 0\n silent_interval = 0\n\n def execute(self, request: ModbusRequest) -> ModbusResponse:\n \"\"\"Execute request.\n\n :param request: Request to send\n :raises ModbusException:\n \"\"\"\n return request\n\n def read_coils(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_read.ReadCoilsResponse:\n \"\"\"Read coils (function code 0x01).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_read.ReadCoilsRequest(address, count, slave, **kwargs)\n return self.execute(request)\n\n def read_discrete_inputs(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_read.ReadDiscreteInputsResponse:\n \"\"\"Read discrete inputs (function code 0x02).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_read.ReadDiscreteInputsRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def read_holding_registers(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_reg_read.ReadHoldingRegistersResponse:\n \"\"\"Read holding registers (function code 0x03).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_reg_read.ReadHoldingRegistersRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def read_input_registers(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_reg_read.ReadInputRegistersResponse:\n \"\"\"Read input registers (function code 0x04).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_reg_read.ReadInputRegistersRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def write_coil(\n self, address: int, value: bool, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_bit_write.WriteSingleCoilResponse:\n \"\"\"Write single coil (function code 0x05).\n\n :param address: Start address to read from\n :param value: Boolean to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_write.WriteSingleCoilRequest(address, value, slave, **kwargs)\n return self.execute(request)\n\n def write_register(\n self,\n address: int,\n value: Union[int, float, str],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_req_write.WriteSingleRegisterResponse:\n \"\"\"Write register (function code 0x06).\n\n :param address: Start address to read from\n :param value: Value to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_req_write.WriteSingleRegisterRequest(\n address, value, slave, **kwargs\n )\n return self.execute(request)\n\n def read_exception_status(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_other_msg.ReadExceptionStatusResponse:\n \"\"\"Read Exception Status (function code 0x07).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_other_msg.ReadExceptionStatusRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_query_data(\n self, msg: bytearray, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnQueryDataResponse:\n \"\"\"Diagnose query data (function code 0x08 - 0x00).\n\n :param msg: Message to be returned\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnQueryDataRequest(msg, slave, **kwargs)\n return self.execute(request)\n\n def diag_restart_communication(\n self, toggle: bool, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.RestartCommunicationsOptionResponse:\n \"\"\"Diagnose restart communication (function code 0x08 - 0x01).\n\n :param toggle: True if toogled.\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.RestartCommunicationsOptionRequest(toggle, slave, **kwargs)\n return self.execute(request)\n\n def diag_read_diagnostic_register(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnDiagnosticRegisterResponse:\n \"\"\"Diagnose read diagnostic register (function code 0x08 - 0x02).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnDiagnosticRegisterRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_change_ascii_input_delimeter(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ChangeAsciiInputDelimiterResponse:\n \"\"\"Diagnose change ASCII input delimiter (function code 0x08 - 0x03).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ChangeAsciiInputDelimiterRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_force_listen_only(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ForceListenOnlyModeResponse:\n \"\"\"Diagnose force listen only (function code 0x08 - 0x04).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ForceListenOnlyModeRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_clear_counters(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ClearCountersResponse:\n \"\"\"Diagnose clear counters (function code 0x08 - 0x0A).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ClearCountersRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_message_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusMessageCountResponse:\n \"\"\"Diagnose read bus message count (function code 0x08 - 0x0B).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusMessageCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_comm_error_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusCommunicationErrorCountResponse:\n \"\"\"Diagnose read Bus Communication Error Count (function code 0x08 - 0x0C).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusCommunicationErrorCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_exception_error_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusExceptionErrorCountResponse:\n \"\"\"Diagnose read Bus Exception Error Count (function code 0x08 - 0x0D).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusExceptionErrorCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_message_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveMessageCountResponse:\n \"\"\"Diagnose read Slave Message Count (function code 0x08 - 0x0E).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveMessageCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_no_response_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveNoReponseCountResponse:\n \"\"\"Diagnose read Slave No Response Count (function code 0x08 - 0x0F).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveNoResponseCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_nak_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveNAKCountResponse:\n \"\"\"Diagnose read Slave NAK Count (function code 0x08 - 0x10).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveNAKCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_busy_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveBusyCountResponse:\n \"\"\"Diagnose read Slave Busy Count (function code 0x08 - 0x11).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveBusyCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_char_overrun_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveBusCharacterOverrunCountResponse:\n \"\"\"Diagnose read Bus Character Overrun Count (function code 0x08 - 0x12).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveBusCharacterOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_iop_overrun_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnIopOverrunCountResponse:\n \"\"\"Diagnose read Iop overrun count (function code 0x08 - 0x13).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnIopOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_clear_overrun_counter(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ClearOverrunCountResponse:\n \"\"\"Diagnose Clear Overrun Counter and Flag (function code 0x08 - 0x14).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ClearOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_getclear_modbus_response(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.GetClearModbusPlusResponse:\n \"\"\"Diagnose Get/Clear modbus plus request (function code 0x08 - 0x15).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.GetClearModbusPlusRequest(slave, **kwargs)\n return self.execute(request)\n\n # TBD missing functions\n # 0x0B Get Comm Event Counter (Serial Line only)\n # 0x0C Get Comm Event Log (Serial Line only)\n\n def write_coils(\n self,\n address: int,\n values: List[bool],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_write.WriteMultipleCoilsResponse:\n \"\"\"Write coils (function code 0x0F).\n\n :param address: Start address to read from\n :param values: List of booleans to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_write.WriteMultipleCoilsRequest(\n address, values, slave, **kwargs\n )\n return self.execute(request)\n\n def write_registers(\n self,\n address: int,\n values: List[Union[int, float, str]],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_req_write.WriteMultipleRegistersResponse:\n \"\"\"Write registers (function code 0x10).\n\n :param address: Start address to read from\n :param values: List of booleans to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_req_write.WriteMultipleRegistersRequest(\n address, values, slave, **kwargs\n )\n return self.execute(request)\n\n # Function codes descriptions\n # 0x11 Report Slave ID (Serial Line only)\n # 0x14 Read File Record\n # 0x15 Write File Record\n # 0x16 Mask Write Register\n # 0x17 Read/Write Multiple registers\n # 0x18 Read FIFO Queue\n # 0x2B Encapsulated Interface Transport\n # 0x2B / 0x0D CANopen General Reference Request and Response\n # PDU\n # 0x2B / 0x0E Read Device Identification\n # MODBUS Exception Responses\n\n def readwrite_registers(\n self, *args, **kwargs\n ) -> pdu_reg_read.ReadWriteMultipleRegistersResponse:\n \"\"\"Read/Write registers\n\n :param args:\n :param kwargs:\n :returns: A deferred response handle\n \"\"\"\n request = pdu_reg_read.ReadWriteMultipleRegistersRequest(*args, **kwargs)\n return self.execute(request)\n\n def mask_write_register(\n self, *args, **kwargs\n ) -> pdu_req_write.MaskWriteRegisterResponse:\n \"\"\"Mask write register.\n\n :args:\n :returns: A deferred response handle\n \"\"\"\n request = pdu_req_write.MaskWriteRegisterRequest(*args, **kwargs)\n return self.execute(request)\n",
"path": "pymodbus/client/mixin.py"
}
] | [
{
"content": "\"\"\"Modbus Client Common.\"\"\"\nimport logging\nfrom typing import List, Union\n\nimport pymodbus.bit_read_message as pdu_bit_read\nimport pymodbus.bit_write_message as pdu_bit_write\nimport pymodbus.diag_message as pdu_diag\nimport pymodbus.other_message as pdu_other_msg\nimport pymodbus.register_read_message as pdu_reg_read\nimport pymodbus.register_write_message as pdu_req_write\nfrom pymodbus.constants import Defaults\nfrom pymodbus.pdu import ModbusRequest, ModbusResponse\nfrom pymodbus.utilities import ModbusTransactionState\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass ModbusClientMixin: # pylint: disable=too-many-public-methods\n \"\"\"**ModbusClientMixin**.\n\n Simple modbus message call::\n\n response = client.read_coils(1, 10)\n # or\n response = await client.read_coils(1, 10)\n\n Advanced modbus message call::\n\n request = ReadCoilsRequest(1,10)\n response = client.execute(request)\n # or\n request = ReadCoilsRequest(1,10)\n response = await client.execute(request)\n\n .. tip::\n All methods can be used directly (synchronous) or with await <method>\n depending on the instantiated client.\n \"\"\"\n\n state = ModbusTransactionState.IDLE\n last_frame_end = 0\n silent_interval = 0\n\n def __init__(self):\n \"\"\"Initialize.\"\"\"\n\n def execute(self, request: ModbusRequest) -> ModbusResponse:\n \"\"\"Execute request.\n\n :param request: Request to send\n :raises ModbusException:\n \"\"\"\n return request\n\n def read_coils(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_read.ReadCoilsResponse:\n \"\"\"Read coils (function code 0x01).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_read.ReadCoilsRequest(address, count, slave, **kwargs)\n return self.execute(request)\n\n def read_discrete_inputs(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_read.ReadDiscreteInputsResponse:\n \"\"\"Read discrete inputs (function code 0x02).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_read.ReadDiscreteInputsRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def read_holding_registers(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_reg_read.ReadHoldingRegistersResponse:\n \"\"\"Read holding registers (function code 0x03).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_reg_read.ReadHoldingRegistersRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def read_input_registers(\n self,\n address: int,\n count: int = Defaults.Count,\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_reg_read.ReadInputRegistersResponse:\n \"\"\"Read input registers (function code 0x04).\n\n :param address: Start address to read from\n :param count: (optional) Number of coils to read\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_reg_read.ReadInputRegistersRequest(\n address, count, slave, **kwargs\n )\n return self.execute(request)\n\n def write_coil(\n self, address: int, value: bool, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_bit_write.WriteSingleCoilResponse:\n \"\"\"Write single coil (function code 0x05).\n\n :param address: Start address to read from\n :param value: Boolean to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_write.WriteSingleCoilRequest(address, value, slave, **kwargs)\n return self.execute(request)\n\n def write_register(\n self,\n address: int,\n value: Union[int, float, str],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_req_write.WriteSingleRegisterResponse:\n \"\"\"Write register (function code 0x06).\n\n :param address: Start address to read from\n :param value: Value to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_req_write.WriteSingleRegisterRequest(\n address, value, slave, **kwargs\n )\n return self.execute(request)\n\n def read_exception_status(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_other_msg.ReadExceptionStatusResponse:\n \"\"\"Read Exception Status (function code 0x07).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_other_msg.ReadExceptionStatusRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_query_data(\n self, msg: bytearray, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnQueryDataResponse:\n \"\"\"Diagnose query data (function code 0x08 - 0x00).\n\n :param msg: Message to be returned\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnQueryDataRequest(msg, slave, **kwargs)\n return self.execute(request)\n\n def diag_restart_communication(\n self, toggle: bool, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.RestartCommunicationsOptionResponse:\n \"\"\"Diagnose restart communication (function code 0x08 - 0x01).\n\n :param toggle: True if toogled.\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.RestartCommunicationsOptionRequest(toggle, slave, **kwargs)\n return self.execute(request)\n\n def diag_read_diagnostic_register(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnDiagnosticRegisterResponse:\n \"\"\"Diagnose read diagnostic register (function code 0x08 - 0x02).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnDiagnosticRegisterRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_change_ascii_input_delimeter(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ChangeAsciiInputDelimiterResponse:\n \"\"\"Diagnose change ASCII input delimiter (function code 0x08 - 0x03).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ChangeAsciiInputDelimiterRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_force_listen_only(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ForceListenOnlyModeResponse:\n \"\"\"Diagnose force listen only (function code 0x08 - 0x04).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ForceListenOnlyModeRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_clear_counters(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ClearCountersResponse:\n \"\"\"Diagnose clear counters (function code 0x08 - 0x0A).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ClearCountersRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_message_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusMessageCountResponse:\n \"\"\"Diagnose read bus message count (function code 0x08 - 0x0B).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusMessageCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_comm_error_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusCommunicationErrorCountResponse:\n \"\"\"Diagnose read Bus Communication Error Count (function code 0x08 - 0x0C).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusCommunicationErrorCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_exception_error_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnBusExceptionErrorCountResponse:\n \"\"\"Diagnose read Bus Exception Error Count (function code 0x08 - 0x0D).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnBusExceptionErrorCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_message_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveMessageCountResponse:\n \"\"\"Diagnose read Slave Message Count (function code 0x08 - 0x0E).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveMessageCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_no_response_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveNoReponseCountResponse:\n \"\"\"Diagnose read Slave No Response Count (function code 0x08 - 0x0F).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveNoResponseCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_nak_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveNAKCountResponse:\n \"\"\"Diagnose read Slave NAK Count (function code 0x08 - 0x10).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveNAKCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_slave_busy_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveBusyCountResponse:\n \"\"\"Diagnose read Slave Busy Count (function code 0x08 - 0x11).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveBusyCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_bus_char_overrun_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnSlaveBusCharacterOverrunCountResponse:\n \"\"\"Diagnose read Bus Character Overrun Count (function code 0x08 - 0x12).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnSlaveBusCharacterOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_read_iop_overrun_count(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ReturnIopOverrunCountResponse:\n \"\"\"Diagnose read Iop overrun count (function code 0x08 - 0x13).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ReturnIopOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_clear_overrun_counter(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.ClearOverrunCountResponse:\n \"\"\"Diagnose Clear Overrun Counter and Flag (function code 0x08 - 0x14).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.ClearOverrunCountRequest(slave, **kwargs)\n return self.execute(request)\n\n def diag_getclear_modbus_response(\n self, slave: int = Defaults.Slave, **kwargs: any\n ) -> pdu_diag.GetClearModbusPlusResponse:\n \"\"\"Diagnose Get/Clear modbus plus request (function code 0x08 - 0x15).\n\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_diag.GetClearModbusPlusRequest(slave, **kwargs)\n return self.execute(request)\n\n # TBD missing functions\n # 0x0B Get Comm Event Counter (Serial Line only)\n # 0x0C Get Comm Event Log (Serial Line only)\n\n def write_coils(\n self,\n address: int,\n values: List[bool],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_bit_write.WriteMultipleCoilsResponse:\n \"\"\"Write coils (function code 0x0F).\n\n :param address: Start address to read from\n :param values: List of booleans to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_bit_write.WriteMultipleCoilsRequest(\n address, values, slave, **kwargs\n )\n return self.execute(request)\n\n def write_registers(\n self,\n address: int,\n values: List[Union[int, float, str]],\n slave: int = Defaults.Slave,\n **kwargs: any\n ) -> pdu_req_write.WriteMultipleRegistersResponse:\n \"\"\"Write registers (function code 0x10).\n\n :param address: Start address to read from\n :param values: List of booleans to write\n :param slave: (optional) Modbus slave unit ID\n :param kwargs: (optional) Experimental parameters.\n :raises ModbusException:\n \"\"\"\n if \"unit\" in kwargs:\n _logger.error(\"Please do not use unit=, convert to slave=.\")\n slave = kwargs.pop(\"unit\", slave)\n request = pdu_req_write.WriteMultipleRegistersRequest(\n address, values, slave, **kwargs\n )\n return self.execute(request)\n\n # Function codes descriptions\n # 0x11 Report Slave ID (Serial Line only)\n # 0x14 Read File Record\n # 0x15 Write File Record\n # 0x16 Mask Write Register\n # 0x17 Read/Write Multiple registers\n # 0x18 Read FIFO Queue\n # 0x2B Encapsulated Interface Transport\n # 0x2B / 0x0D CANopen General Reference Request and Response\n # PDU\n # 0x2B / 0x0E Read Device Identification\n # MODBUS Exception Responses\n\n def readwrite_registers(\n self, *args, **kwargs\n ) -> pdu_reg_read.ReadWriteMultipleRegistersResponse:\n \"\"\"Read/Write registers\n\n :param args:\n :param kwargs:\n :returns: A deferred response handle\n \"\"\"\n request = pdu_reg_read.ReadWriteMultipleRegistersRequest(*args, **kwargs)\n return self.execute(request)\n\n def mask_write_register(\n self, *args, **kwargs\n ) -> pdu_req_write.MaskWriteRegisterResponse:\n \"\"\"Mask write register.\n\n :args:\n :returns: A deferred response handle\n \"\"\"\n request = pdu_req_write.MaskWriteRegisterRequest(*args, **kwargs)\n return self.execute(request)\n",
"path": "pymodbus/client/mixin.py"
}
] | diff --git a/pymodbus/client/mixin.py b/pymodbus/client/mixin.py
index 72a89456b..5cd99a462 100644
--- a/pymodbus/client/mixin.py
+++ b/pymodbus/client/mixin.py
@@ -42,6 +42,9 @@ class ModbusClientMixin: # pylint: disable=too-many-public-methods
last_frame_end = 0
silent_interval = 0
+ def __init__(self):
+ """Initialize."""
+
def execute(self, request: ModbusRequest) -> ModbusResponse:
"""Execute request.
|
facebookresearch__hydra-1531 | Add `env` to Hydra's config group
This is a follow up to #1441
the `env` config group will allows users to manually change the env defaults value. (such as provides default callbacks or update run.dir )
| [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom omegaconf import MISSING\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass HelpConf:\n app_name: str = MISSING\n header: str = MISSING\n footer: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass HydraHelpConf:\n hydra_help: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass RunDir:\n dir: str = MISSING\n\n\n@dataclass\nclass SweepDir:\n dir: str = MISSING\n subdir: str = MISSING\n\n\n@dataclass\nclass OverridesConf:\n # Overrides for the hydra configuration\n hydra: List[str] = field(default_factory=lambda: [])\n # Overrides for the task configuration\n task: List[str] = field(default_factory=lambda: [])\n\n\n# job runtime information will be populated here\n@dataclass\nclass JobConf:\n # Job name, populated automatically unless specified by the user (in config or cli)\n name: str = MISSING\n\n # Populated automatically by Hydra.\n # Concatenation of job overrides that can be used as a part\n # of the directory name.\n # This can be configured via hydra.job.config.override_dirname\n override_dirname: str = MISSING\n\n # Job ID in underlying scheduling system\n id: str = MISSING\n\n # Job number if job is a part of a sweep\n num: int = MISSING\n\n # The config name used by the job\n config_name: Optional[str] = MISSING\n\n # Environment variables to set remotely\n env_set: Dict[str, str] = field(default_factory=dict)\n # Environment variables to copy from the launching machine\n env_copy: List[str] = field(default_factory=list)\n\n # Job config\n @dataclass\n class JobConfig:\n @dataclass\n # configuration for the ${hydra.job.override_dirname} runtime variable\n class OverrideDirname:\n kv_sep: str = \"=\"\n item_sep: str = \",\"\n exclude_keys: List[str] = field(default_factory=list)\n\n override_dirname: OverrideDirname = OverrideDirname()\n\n config: JobConfig = JobConfig()\n\n\n@dataclass\nclass RuntimeConf:\n version: str = MISSING\n cwd: str = MISSING\n\n\n@dataclass\nclass HydraConf:\n defaults: List[Any] = field(\n default_factory=lambda: [\n {\"output\": \"default\"},\n {\"launcher\": \"basic\"},\n {\"sweeper\": \"basic\"},\n {\"help\": \"default\"},\n {\"hydra_help\": \"default\"},\n {\"hydra_logging\": \"default\"},\n {\"job_logging\": \"default\"},\n {\"callbacks\": None},\n ]\n )\n\n # Elements to append to the config search path.\n # Note: This can only be configured in the primary config.\n searchpath: List[str] = field(default_factory=list)\n\n # Normal run output configuration\n run: RunDir = RunDir()\n # Multi-run output configuration\n sweep: SweepDir = SweepDir()\n # Logging configuration for Hydra\n hydra_logging: Any = MISSING\n # Logging configuration for the job\n job_logging: Any = MISSING\n\n # Sweeper configuration\n sweeper: Any = MISSING\n # Launcher configuration\n launcher: Any = MISSING\n # Callbacks configuration\n callbacks: Dict[str, Any] = field(default_factory=dict)\n\n # Program Help template\n help: HelpConf = HelpConf()\n # Hydra's Help template\n hydra_help: HydraHelpConf = HydraHelpConf()\n\n # Output directory for produced configuration files and overrides.\n # E.g., hydra.yaml, overrides.yaml will go here. Useful for debugging\n # and extra context when looking at past runs.\n # Setting to None will prevent the creation of the output subdir.\n output_subdir: Optional[str] = \".hydra\"\n\n # Those lists will contain runtime overrides\n overrides: OverridesConf = OverridesConf()\n\n job: JobConf = JobConf()\n\n # populated at runtime\n runtime: RuntimeConf = RuntimeConf()\n\n # Can be a boolean, string or a list of strings\n # If a boolean, setting to true will set the log level for the root logger to debug\n # If a string, it's interpreted as a the list [string]\n # If a list, each element is interpreted as a logger to have logging level set to debug.\n # Typical command lines to manipulate hydra.verbose:\n # hydra.verbose=true\n # hydra.verbose=[hydra,__main__]\n # TODO: good use case for Union support in OmegaConf\n verbose: Any = False\n\n # Composition choices dictionary\n choices: Dict[str, str] = field(default_factory=lambda: {})\n\n\ncs = ConfigStore.instance()\n\ncs.store(\n group=\"hydra\",\n name=\"config\",\n node=HydraConf(),\n provider=\"hydra\",\n)\n",
"path": "hydra/conf/__init__.py"
}
] | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Optional\n\nfrom omegaconf import MISSING\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass HelpConf:\n app_name: str = MISSING\n header: str = MISSING\n footer: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass HydraHelpConf:\n hydra_help: str = MISSING\n template: str = MISSING\n\n\n@dataclass\nclass RunDir:\n dir: str = MISSING\n\n\n@dataclass\nclass SweepDir:\n dir: str = MISSING\n subdir: str = MISSING\n\n\n@dataclass\nclass OverridesConf:\n # Overrides for the hydra configuration\n hydra: List[str] = field(default_factory=lambda: [])\n # Overrides for the task configuration\n task: List[str] = field(default_factory=lambda: [])\n\n\n# job runtime information will be populated here\n@dataclass\nclass JobConf:\n # Job name, populated automatically unless specified by the user (in config or cli)\n name: str = MISSING\n\n # Populated automatically by Hydra.\n # Concatenation of job overrides that can be used as a part\n # of the directory name.\n # This can be configured via hydra.job.config.override_dirname\n override_dirname: str = MISSING\n\n # Job ID in underlying scheduling system\n id: str = MISSING\n\n # Job number if job is a part of a sweep\n num: int = MISSING\n\n # The config name used by the job\n config_name: Optional[str] = MISSING\n\n # Environment variables to set remotely\n env_set: Dict[str, str] = field(default_factory=dict)\n # Environment variables to copy from the launching machine\n env_copy: List[str] = field(default_factory=list)\n\n # Job config\n @dataclass\n class JobConfig:\n @dataclass\n # configuration for the ${hydra.job.override_dirname} runtime variable\n class OverrideDirname:\n kv_sep: str = \"=\"\n item_sep: str = \",\"\n exclude_keys: List[str] = field(default_factory=list)\n\n override_dirname: OverrideDirname = OverrideDirname()\n\n config: JobConfig = JobConfig()\n\n\n@dataclass\nclass RuntimeConf:\n version: str = MISSING\n cwd: str = MISSING\n\n\n@dataclass\nclass HydraConf:\n defaults: List[Any] = field(\n default_factory=lambda: [\n {\"output\": \"default\"},\n {\"launcher\": \"basic\"},\n {\"sweeper\": \"basic\"},\n {\"help\": \"default\"},\n {\"hydra_help\": \"default\"},\n {\"hydra_logging\": \"default\"},\n {\"job_logging\": \"default\"},\n {\"callbacks\": None},\n # env specific overrides\n {\"env\": \"default\"},\n ]\n )\n\n # Elements to append to the config search path.\n # Note: This can only be configured in the primary config.\n searchpath: List[str] = field(default_factory=list)\n\n # Normal run output configuration\n run: RunDir = RunDir()\n # Multi-run output configuration\n sweep: SweepDir = SweepDir()\n # Logging configuration for Hydra\n hydra_logging: Any = MISSING\n # Logging configuration for the job\n job_logging: Any = MISSING\n\n # Sweeper configuration\n sweeper: Any = MISSING\n # Launcher configuration\n launcher: Any = MISSING\n # Callbacks configuration\n callbacks: Dict[str, Any] = field(default_factory=dict)\n\n # Program Help template\n help: HelpConf = HelpConf()\n # Hydra's Help template\n hydra_help: HydraHelpConf = HydraHelpConf()\n\n # Output directory for produced configuration files and overrides.\n # E.g., hydra.yaml, overrides.yaml will go here. Useful for debugging\n # and extra context when looking at past runs.\n # Setting to None will prevent the creation of the output subdir.\n output_subdir: Optional[str] = \".hydra\"\n\n # Those lists will contain runtime overrides\n overrides: OverridesConf = OverridesConf()\n\n job: JobConf = JobConf()\n\n # populated at runtime\n runtime: RuntimeConf = RuntimeConf()\n\n # Can be a boolean, string or a list of strings\n # If a boolean, setting to true will set the log level for the root logger to debug\n # If a string, it's interpreted as a the list [string]\n # If a list, each element is interpreted as a logger to have logging level set to debug.\n # Typical command lines to manipulate hydra.verbose:\n # hydra.verbose=true\n # hydra.verbose=[hydra,__main__]\n # TODO: good use case for Union support in OmegaConf\n verbose: Any = False\n\n # Composition choices dictionary\n choices: Dict[str, str] = field(default_factory=lambda: {})\n\n\ncs = ConfigStore.instance()\n\ncs.store(\n group=\"hydra\",\n name=\"config\",\n node=HydraConf(),\n provider=\"hydra\",\n)\n",
"path": "hydra/conf/__init__.py"
}
] | diff --git a/hydra/conf/__init__.py b/hydra/conf/__init__.py
index dab5348094f..efd536bf88e 100644
--- a/hydra/conf/__init__.py
+++ b/hydra/conf/__init__.py
@@ -99,6 +99,8 @@ class HydraConf:
{"hydra_logging": "default"},
{"job_logging": "default"},
{"callbacks": None},
+ # env specific overrides
+ {"env": "default"},
]
)
diff --git a/hydra/conf/hydra/env/default.yaml b/hydra/conf/hydra/env/default.yaml
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/__init__.py b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/__init__.py
new file mode 100644
index 00000000000..168f9979a46
--- /dev/null
+++ b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
diff --git a/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/__init__.py b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/__init__.py
new file mode 100644
index 00000000000..168f9979a46
--- /dev/null
+++ b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
diff --git a/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/hydra/env/default.yaml b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/hydra/env/default.yaml
new file mode 100644
index 00000000000..d39c37b6290
--- /dev/null
+++ b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/conf/hydra/env/default.yaml
@@ -0,0 +1,6 @@
+# @package _global_
+
+hydra:
+ job:
+ env_set:
+ FOO: bar
diff --git a/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/env_defaults.py b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/env_defaults.py
new file mode 100644
index 00000000000..5338cf21df5
--- /dev/null
+++ b/tests/test_apps/custom_env_defaults/hydra_plugins/env_defaults/env_defaults.py
@@ -0,0 +1,13 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+from hydra.core.config_search_path import ConfigSearchPath
+from hydra.plugins.search_path_plugin import SearchPathPlugin
+
+
+class TestEnvDefaultSearchPathPlugin(SearchPathPlugin):
+ def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
+ # prepend search path to override env default
+ search_path.prepend(
+ provider="test-env-defaults",
+ path="pkg://hydra_plugins.env_defaults.conf",
+ anchor="hydra",
+ )
diff --git a/tests/test_apps/custom_env_defaults/my_app.py b/tests/test_apps/custom_env_defaults/my_app.py
new file mode 100644
index 00000000000..57994e76e11
--- /dev/null
+++ b/tests/test_apps/custom_env_defaults/my_app.py
@@ -0,0 +1,18 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+import logging
+import os
+
+from omegaconf import DictConfig
+
+import hydra
+
+log = logging.getLogger(__name__)
+
+
[email protected]()
+def my_app(_: DictConfig) -> None:
+ assert os.getenv("FOO") == "bar"
+
+
+if __name__ == "__main__":
+ my_app()
diff --git a/tests/test_completion.py b/tests/test_completion.py
index 65eca2452ec..fa1613893fe 100644
--- a/tests/test_completion.py
+++ b/tests/test_completion.py
@@ -120,6 +120,7 @@ def test_bash_completion_with_dot_in_path() -> None:
"hydra/",
3,
[
+ "hydra/env=",
"hydra/help=",
"hydra/hydra_help=",
"hydra/hydra_logging=",
@@ -165,11 +166,13 @@ def test_completion_plugin(
config_loader = create_config_loader()
bc = DefaultCompletionPlugin(config_loader)
ret = bc._query(config_name="config.yaml", line=line_prefix + line)
+
assert ret == expected
ret = bc._query(
config_name="config.yaml", line="--multirun " + line_prefix + line
)
+
assert ret == expected
@mark.skipif(
diff --git a/tests/test_config_loader.py b/tests/test_config_loader.py
index 6b014569e06..d624db55ec9 100644
--- a/tests/test_config_loader.py
+++ b/tests/test_config_loader.py
@@ -402,6 +402,7 @@ def test_list_groups() -> None:
]
assert sorted(config_loader.list_groups("hydra")) == [
+ "env",
"help",
"hydra_help",
"hydra_logging",
@@ -757,6 +758,7 @@ def test_overriding_with_dict(config: str, overrides: Any, expected: Any) -> Non
[],
{
"optimizer": "nesterov",
+ "hydra/env": "default",
"hydra/callbacks": None,
"hydra/hydra_help": "default",
"hydra/help": "default",
@@ -773,6 +775,7 @@ def test_overriding_with_dict(config: str, overrides: Any, expected: Any) -> Non
["optimizer=adam"],
{
"optimizer": "adam",
+ "hydra/env": "default",
"hydra/callbacks": None,
"hydra/hydra_help": "default",
"hydra/help": "default",
diff --git a/tests/test_env_defaults.py b/tests/test_env_defaults.py
new file mode 100644
index 00000000000..5d1d20d9ade
--- /dev/null
+++ b/tests/test_env_defaults.py
@@ -0,0 +1,15 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+from pathlib import Path
+
+from hydra.test_utils.test_utils import chdir_hydra_root, run_python_script
+
+chdir_hydra_root()
+
+
+def test_env_defaults(tmpdir: Path) -> None:
+
+ cmd = [
+ "tests/test_apps/custom_env_defaults/my_app.py",
+ "hydra.run.dir=" + str(tmpdir),
+ ]
+ run_python_script(cmd)
|
cal-itp__benefits-213 | Send X-XSS-Protection header
The X-XSS-Protection header can be used to manage certain browser's protection against reflected cross-site scripting (XSS), stopping a page from being loaded if an attack is detected. In modern browsers, the Content-Security-Policy header can provide better protection against XSS and setting X-XSS-Protection might be redundant (#203 tracks CSP implementation).
See more at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
We'll want the following header/value:
```
X-XSS-Protection: 1; mode=block
```
This can be done in a new Middleware and configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py#L45) for all requests/responses.
| [
{
"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Español\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n",
"path": "benefits/settings.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Español\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n",
"path": "benefits/settings.py"
}
] | diff --git a/benefits/settings.py b/benefits/settings.py
index 722aeafcfd..830d0bb5b3 100644
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -75,6 +75,8 @@
CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
SESSION_COOKIE_SECURE = True
+SECURE_BROWSER_XSS_FILTER = True
+
ROOT_URLCONF = "benefits.urls"
template_ctx_processors = [
|
Netflix__lemur-455 | A custom cert name with spaces causes AWS Upload failures
Creating a cert with a custom name that has spaces, such as: `My Certificate` will not properly get uploaded to AWS.
-- Potential Fixes:
1. Prevent spaces in custom names
2. Allow custom cert names to be editable
3. If spaces are allowed, the AWS uploader plugin needs to upload it in a way that can work properly.
| [
{
"content": "\"\"\"\n.. module: lemur.certificates.models\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport datetime\n\nimport lemur.common.utils\nfrom flask import current_app\n\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.expression import case\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy import event, Integer, ForeignKey, String, DateTime, PassiveDefault, func, Column, Text, Boolean\n\nfrom lemur.database import db\nfrom lemur.models import certificate_associations, certificate_source_associations, \\\n certificate_destination_associations, certificate_notification_associations, \\\n certificate_replacement_associations, roles_certificates\nfrom lemur.plugins.base import plugins\nfrom lemur.utils import Vault\n\nfrom lemur.common import defaults\nfrom lemur.domains.models import Domain\n\n\ndef get_or_increase_name(name):\n count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()\n\n if count >= 1:\n return name + '-' + str(count)\n\n return name\n\n\nclass Certificate(db.Model):\n __tablename__ = 'certificates'\n id = Column(Integer, primary_key=True)\n owner = Column(String(128), nullable=False)\n name = Column(String(128), unique=True)\n description = Column(String(1024))\n notify = Column(Boolean, default=True)\n\n body = Column(Text(), nullable=False)\n chain = Column(Text())\n private_key = Column(Vault)\n\n issuer = Column(String(128))\n serial = Column(String(128))\n cn = Column(String(128))\n deleted = Column(Boolean, index=True)\n\n not_before = Column(DateTime)\n not_after = Column(DateTime)\n date_created = Column(DateTime, PassiveDefault(func.now()), nullable=False)\n\n signing_algorithm = Column(String(128))\n status = Column(String(128))\n bits = Column(Integer())\n san = Column(String(1024)) # TODO this should be migrated to boolean\n\n user_id = Column(Integer, ForeignKey('users.id'))\n authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n root_authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n\n notifications = relationship(\"Notification\", secondary=certificate_notification_associations, backref='certificate')\n destinations = relationship(\"Destination\", secondary=certificate_destination_associations, backref='certificate')\n sources = relationship(\"Source\", secondary=certificate_source_associations, backref='certificate')\n domains = relationship(\"Domain\", secondary=certificate_associations, backref=\"certificate\")\n roles = relationship(\"Role\", secondary=roles_certificates, backref=\"certificate\")\n replaces = relationship(\"Certificate\",\n secondary=certificate_replacement_associations,\n primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa\n secondaryjoin=id == certificate_replacement_associations.c.replaced_certificate_id, # noqa\n backref='replaced')\n\n endpoints = relationship(\"Endpoint\", backref='certificate')\n\n def __init__(self, **kwargs):\n cert = lemur.common.utils.parse_certificate(kwargs['body'])\n\n self.issuer = defaults.issuer(cert)\n self.cn = defaults.common_name(cert)\n self.san = defaults.san(cert)\n self.not_before = defaults.not_before(cert)\n self.not_after = defaults.not_after(cert)\n\n # when destinations are appended they require a valid name.\n if kwargs.get('name'):\n self.name = get_or_increase_name(kwargs['name'])\n else:\n self.name = get_or_increase_name(defaults.certificate_name(self.cn, self.issuer, self.not_before, self.not_after, self.san))\n\n self.owner = kwargs['owner']\n self.body = kwargs['body'].strip()\n\n if kwargs.get('private_key'):\n self.private_key = kwargs['private_key'].strip()\n\n if kwargs.get('chain'):\n self.chain = kwargs['chain'].strip()\n\n self.destinations = kwargs.get('destinations', [])\n self.notifications = kwargs.get('notifications', [])\n self.description = kwargs.get('description')\n self.roles = list(set(kwargs.get('roles', [])))\n self.replaces = kwargs.get('replacements', [])\n self.signing_algorithm = defaults.signing_algorithm(cert)\n self.bits = defaults.bitstrength(cert)\n self.serial = defaults.serial(cert)\n\n for domain in defaults.domains(cert):\n self.domains.append(Domain(name=domain))\n\n @property\n def active(self):\n if self.endpoints:\n return True\n\n @hybrid_property\n def expired(self):\n if self.not_after <= datetime.datetime.now():\n return True\n\n @expired.expression\n def expired(cls):\n return case(\n [\n (cls.now_after <= datetime.datetime.now(), True)\n ],\n else_=False\n )\n\n @hybrid_property\n def revoked(self):\n if 'revoked' == self.status:\n return True\n\n @revoked.expression\n def revoked(cls):\n return case(\n [\n (cls.status == 'revoked', True)\n ],\n else_=False\n )\n\n def get_arn(self, account_number):\n \"\"\"\n Generate a valid AWS IAM arn\n\n :rtype : str\n :param account_number:\n :return:\n \"\"\"\n return \"arn:aws:iam::{}:server-certificate/{}\".format(account_number, self.name)\n\n\[email protected]_for(Certificate.destinations, 'append')\ndef update_destinations(target, value, initiator):\n \"\"\"\n Attempt to upload the new certificate to the new destination\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n destination_plugin = plugins.get(value.plugin_name)\n\n try:\n destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)\n except Exception as e:\n current_app.logger.exception(e)\n\n\[email protected]_for(Certificate.replaces, 'append')\ndef update_replacement(target, value, initiator):\n \"\"\"\n When a certificate is marked as 'replaced' it is then marked as in-active\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n value.active = False\n\n\[email protected]_for(Certificate, 'before_update')\ndef protect_active(mapper, connection, target):\n \"\"\"\n When a certificate has a replacement do not allow it to be marked as 'active'\n\n :param connection:\n :param mapper:\n :param target:\n :return:\n \"\"\"\n if target.active:\n if not target.notify:\n raise Exception(\n \"Cannot silence notification for a certificate Lemur has been found to be currently deployed onto endpoints\"\n )\n",
"path": "lemur/certificates/models.py"
}
] | [
{
"content": "\"\"\"\n.. module: lemur.certificates.models\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport datetime\n\nimport lemur.common.utils\nfrom flask import current_app\n\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.expression import case\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy import event, Integer, ForeignKey, String, DateTime, PassiveDefault, func, Column, Text, Boolean\n\nfrom lemur.database import db\nfrom lemur.models import certificate_associations, certificate_source_associations, \\\n certificate_destination_associations, certificate_notification_associations, \\\n certificate_replacement_associations, roles_certificates\nfrom lemur.plugins.base import plugins\nfrom lemur.utils import Vault\n\nfrom lemur.common import defaults\nfrom lemur.domains.models import Domain\n\n\ndef get_or_increase_name(name):\n name = '-'.join(name.strip().split(' '))\n count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()\n\n if count >= 1:\n return name + '-' + str(count)\n\n return name\n\n\nclass Certificate(db.Model):\n __tablename__ = 'certificates'\n id = Column(Integer, primary_key=True)\n owner = Column(String(128), nullable=False)\n name = Column(String(128), unique=True)\n description = Column(String(1024))\n active = Column(Boolean, default=True)\n\n body = Column(Text(), nullable=False)\n chain = Column(Text())\n private_key = Column(Vault)\n\n issuer = Column(String(128))\n serial = Column(String(128))\n cn = Column(String(128))\n deleted = Column(Boolean, index=True)\n\n not_before = Column(DateTime)\n not_after = Column(DateTime)\n date_created = Column(DateTime, PassiveDefault(func.now()), nullable=False)\n\n signing_algorithm = Column(String(128))\n status = Column(String(128))\n bits = Column(Integer())\n san = Column(String(1024)) # TODO this should be migrated to boolean\n\n user_id = Column(Integer, ForeignKey('users.id'))\n authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n root_authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n\n notifications = relationship(\"Notification\", secondary=certificate_notification_associations, backref='certificate')\n destinations = relationship(\"Destination\", secondary=certificate_destination_associations, backref='certificate')\n sources = relationship(\"Source\", secondary=certificate_source_associations, backref='certificate')\n domains = relationship(\"Domain\", secondary=certificate_associations, backref=\"certificate\")\n roles = relationship(\"Role\", secondary=roles_certificates, backref=\"certificate\")\n replaces = relationship(\"Certificate\",\n secondary=certificate_replacement_associations,\n primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa\n secondaryjoin=id == certificate_replacement_associations.c.replaced_certificate_id, # noqa\n backref='replaced')\n\n endpoints = relationship(\"Endpoint\", backref='certificate')\n\n def __init__(self, **kwargs):\n cert = lemur.common.utils.parse_certificate(kwargs['body'])\n\n self.issuer = defaults.issuer(cert)\n self.cn = defaults.common_name(cert)\n self.san = defaults.san(cert)\n self.not_before = defaults.not_before(cert)\n self.not_after = defaults.not_after(cert)\n\n # when destinations are appended they require a valid name.\n if kwargs.get('name'):\n self.name = get_or_increase_name(kwargs['name'])\n else:\n self.name = get_or_increase_name(defaults.certificate_name(self.cn, self.issuer, self.not_before, self.not_after, self.san))\n\n self.owner = kwargs['owner']\n self.body = kwargs['body'].strip()\n\n if kwargs.get('private_key'):\n self.private_key = kwargs['private_key'].strip()\n\n if kwargs.get('chain'):\n self.chain = kwargs['chain'].strip()\n\n self.destinations = kwargs.get('destinations', [])\n self.notifications = kwargs.get('notifications', [])\n self.description = kwargs.get('description')\n self.roles = list(set(kwargs.get('roles', [])))\n self.replaces = kwargs.get('replacements', [])\n self.signing_algorithm = defaults.signing_algorithm(cert)\n self.bits = defaults.bitstrength(cert)\n self.serial = defaults.serial(cert)\n\n for domain in defaults.domains(cert):\n self.domains.append(Domain(name=domain))\n\n @hybrid_property\n def expired(self):\n if self.not_after <= datetime.datetime.now():\n return True\n\n @expired.expression\n def expired(cls):\n return case(\n [\n (cls.now_after <= datetime.datetime.now(), True)\n ],\n else_=False\n )\n\n @hybrid_property\n def revoked(self):\n if 'revoked' == self.status:\n return True\n\n @revoked.expression\n def revoked(cls):\n return case(\n [\n (cls.status == 'revoked', True)\n ],\n else_=False\n )\n\n def get_arn(self, account_number):\n \"\"\"\n Generate a valid AWS IAM arn\n\n :rtype : str\n :param account_number:\n :return:\n \"\"\"\n return \"arn:aws:iam::{}:server-certificate/{}\".format(account_number, self.name)\n\n\[email protected]_for(Certificate.destinations, 'append')\ndef update_destinations(target, value, initiator):\n \"\"\"\n Attempt to upload the new certificate to the new destination\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n destination_plugin = plugins.get(value.plugin_name)\n\n try:\n destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)\n except Exception as e:\n current_app.logger.exception(e)\n\n\[email protected]_for(Certificate.replaces, 'append')\ndef update_replacement(target, value, initiator):\n \"\"\"\n When a certificate is marked as 'replaced' it is then marked as in-active\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n value.active = False\n\n\[email protected]_for(Certificate, 'before_update')\ndef protect_active(mapper, connection, target):\n \"\"\"\n When a certificate has a replacement do not allow it to be marked as 'active'\n\n :param connection:\n :param mapper:\n :param target:\n :return:\n \"\"\"\n if target.active:\n if target.replaced:\n raise Exception(\"Cannot mark certificate as active, certificate has been marked as replaced.\")\n",
"path": "lemur/certificates/models.py"
}
] | diff --git a/lemur/certificates/models.py b/lemur/certificates/models.py
index f5a7d9caa5..30acbbb0a5 100644
--- a/lemur/certificates/models.py
+++ b/lemur/certificates/models.py
@@ -27,6 +27,7 @@
def get_or_increase_name(name):
+ name = '-'.join(name.strip().split(' '))
count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()
if count >= 1:
|
nltk__nltk-1274 | Tox fails with "ERROR: Failure: ImportError (No module named 'six')"
When I try to run the tests with Tox (on Ubuntu) from within a local clone of the repo, it manages to install the dependencies but blows up when trying to import things from within NLTK.
I imagine I can work around this by figuring out how to manually run just the tests I care about, but it's inconvenient.
I'm not sure whether I'm doing something dumb or whether the Tox setup is broken; if the former, the CONTRIBUTING docs should probably mention what needs to be done besides just running Tox; if the latter, it should probably be fixed.
Here's the full output (had to pastebin it due to GitHub's post length limit):
http://pastebin.com/ENuCLnv6
| [
{
"content": "# Natural Language Toolkit: Tokenizer Interface\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nTokenizer Interface\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom six import add_metaclass\n\nfrom nltk.internals import overridden\nfrom nltk.tokenize.util import string_span_tokenize\n\n@add_metaclass(ABCMeta)\nclass TokenizerI(object):\n \"\"\"\n A processing interface for tokenizing a string.\n Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).\n \"\"\"\n @abstractmethod\n def tokenize(self, s):\n \"\"\"\n Return a tokenized copy of *s*.\n\n :rtype: list of str\n \"\"\"\n if overridden(self.tokenize_sents):\n return self.tokenize_sents([s])[0]\n\n def span_tokenize(self, s):\n \"\"\"\n Identify the tokens using integer offsets ``(start_i, end_i)``,\n where ``s[start_i:end_i]`` is the corresponding token.\n\n :rtype: iter(tuple(int, int))\n \"\"\"\n raise NotImplementedError()\n\n def tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.tokenize(s) for s in strings]\n\n :rtype: list(list(str))\n \"\"\"\n return [self.tokenize(s) for s in strings]\n\n def span_tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.span_tokenize(s) for s in strings]\n\n :rtype: iter(list(tuple(int, int)))\n \"\"\"\n for s in strings:\n yield list(self.span_tokenize(s))\n\n\nclass StringTokenizer(TokenizerI):\n \"\"\"A tokenizer that divides a string into substrings by splitting\n on the specified string (defined in subclasses).\n \"\"\"\n\n def tokenize(self, s):\n return s.split(self._string)\n\n def span_tokenize(self, s):\n for span in string_span_tokenize(s, self._string):\n yield span\n\n\n",
"path": "nltk/tokenize/api.py"
}
] | [
{
"content": "# Natural Language Toolkit: Tokenizer Interface\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nTokenizer Interface\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom nltk.six import add_metaclass\n\nfrom nltk.internals import overridden\nfrom nltk.tokenize.util import string_span_tokenize\n\n@add_metaclass(ABCMeta)\nclass TokenizerI(object):\n \"\"\"\n A processing interface for tokenizing a string.\n Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).\n \"\"\"\n @abstractmethod\n def tokenize(self, s):\n \"\"\"\n Return a tokenized copy of *s*.\n\n :rtype: list of str\n \"\"\"\n if overridden(self.tokenize_sents):\n return self.tokenize_sents([s])[0]\n\n def span_tokenize(self, s):\n \"\"\"\n Identify the tokens using integer offsets ``(start_i, end_i)``,\n where ``s[start_i:end_i]`` is the corresponding token.\n\n :rtype: iter(tuple(int, int))\n \"\"\"\n raise NotImplementedError()\n\n def tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.tokenize(s) for s in strings]\n\n :rtype: list(list(str))\n \"\"\"\n return [self.tokenize(s) for s in strings]\n\n def span_tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.span_tokenize(s) for s in strings]\n\n :rtype: iter(list(tuple(int, int)))\n \"\"\"\n for s in strings:\n yield list(self.span_tokenize(s))\n\n\nclass StringTokenizer(TokenizerI):\n \"\"\"A tokenizer that divides a string into substrings by splitting\n on the specified string (defined in subclasses).\n \"\"\"\n\n def tokenize(self, s):\n return s.split(self._string)\n\n def span_tokenize(self, s):\n for span in string_span_tokenize(s, self._string):\n yield span\n\n\n",
"path": "nltk/tokenize/api.py"
}
] | diff --git a/nltk/test/probability.doctest b/nltk/test/probability.doctest
index b19c9d689a..594d18b00a 100644
--- a/nltk/test/probability.doctest
+++ b/nltk/test/probability.doctest
@@ -69,33 +69,33 @@ ConditionalFreqDist
-------------------
>>> cfd1 = ConditionalFreqDist()
- >>> cfd1[1] = FreqDist('abbbc')
+ >>> cfd1[1] = FreqDist('abbbb')
>>> cfd1[2] = FreqDist('xxxxyy')
>>> cfd1
<ConditionalFreqDist with 2 conditions>
>>> cfd2 = ConditionalFreqDist()
- >>> cfd2[1] = FreqDist('bccd')
- >>> cfd2[2] = FreqDist('xxyyyzz')
+ >>> cfd2[1] = FreqDist('bbccc')
+ >>> cfd2[2] = FreqDist('xxxyyyzz')
>>> cfd2[3] = FreqDist('m')
>>> cfd2
<ConditionalFreqDist with 3 conditions>
>>> r = cfd1 + cfd2
>>> [(i,r[i]) for i in r.conditions()]
- [(1, FreqDist({'b': 4, 'c': 3, 'a': 1, 'd': 1})), (2, FreqDist({'x': 6, 'y': 5, 'z': 2})), (3, FreqDist({'m': 1}))]
+ [(1, FreqDist({'b': 6, 'c': 3, 'a': 1})), (2, FreqDist({'x': 7, 'y': 5, 'z': 2})), (3, FreqDist({'m': 1}))]
>>> r = cfd1 - cfd2
>>> [(i,r[i]) for i in r.conditions()]
- [(1, FreqDist({'b': 2, 'a': 1})), (2, FreqDist({'x': 2}))]
+ [(1, FreqDist({'b': 2, 'a': 1})), (2, FreqDist({'x': 1}))]
>>> r = cfd1 | cfd2
>>> [(i,r[i]) for i in r.conditions()]
- [(1, FreqDist({'b': 3, 'c': 2, 'a': 1, 'd': 1})), (2, FreqDist({'x': 4, 'y': 3, 'z': 2})), (3, FreqDist({'m': 1}))]
+ [(1, FreqDist({'b': 4, 'c': 3, 'a': 1})), (2, FreqDist({'x': 4, 'y': 3, 'z': 2})), (3, FreqDist({'m': 1}))]
>>> r = cfd1 & cfd2
>>> [(i,r[i]) for i in r.conditions()]
- [(1, FreqDist({'c': 1, 'b': 1})), (2, FreqDist({'y': 2, 'x': 2}))]
+ [(1, FreqDist({'b': 2})), (2, FreqDist({'x': 3, 'y': 2}))]
Testing some HMM estimators
---------------------------
diff --git a/nltk/tokenize/api.py b/nltk/tokenize/api.py
index 98c168b43b..174d23f394 100644
--- a/nltk/tokenize/api.py
+++ b/nltk/tokenize/api.py
@@ -11,7 +11,7 @@
"""
from abc import ABCMeta, abstractmethod
-from six import add_metaclass
+from nltk.six import add_metaclass
from nltk.internals import overridden
from nltk.tokenize.util import string_span_tokenize
|
python-telegram-bot__python-telegram-bot-699 | Bot tries to assign user id to anonymous channel post
<!--
Thanks for reporting issues of python-telegram-bot!
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Add a bot to a channel that doesn't have post signing.
2. Post something to that channel.
3. Bot tries to assign a user id to the update, bot gets an AttributeError because there is no user.
### Expected behaviour
The bot checks if the user is in the update before trying to get the id.
### Actual behaviour
An AttributeError is thrown and the bot fails to process the update.
### Configuration
**Operating System:**
macOS Sierra
**Version of Python, python-telegram-bot & dependencies:**
```
python-telegram-bot 6.1.0
urllib3 1.20
certifi 2017.04.17
future 0.16.0
Python 3.6.0 (default, Dec 24 2016, 08:01:42) [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)]
```
### Logs
```
Traceback (most recent call last):
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/dispatcher.py", line 264, in process_update
if handler.check_update(update):
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/conversationhandler.py", line 181, in check_update
key = self._get_key(update)
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/conversationhandler.py", line 164, in _get_key
key.append(user.id)
AttributeError: 'NoneType' object has no attribute 'id'
```
Bot tries to assign user id to anonymous channel post
<!--
Thanks for reporting issues of python-telegram-bot!
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Add a bot to a channel that doesn't have post signing.
2. Post something to that channel.
3. Bot tries to assign a user id to the update, bot gets an AttributeError because there is no user.
### Expected behaviour
The bot checks if the user is in the update before trying to get the id.
### Actual behaviour
An AttributeError is thrown and the bot fails to process the update.
### Configuration
**Operating System:**
macOS Sierra
**Version of Python, python-telegram-bot & dependencies:**
```
python-telegram-bot 6.1.0
urllib3 1.20
certifi 2017.04.17
future 0.16.0
Python 3.6.0 (default, Dec 24 2016, 08:01:42) [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.42.1)]
```
### Logs
```
Traceback (most recent call last):
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/dispatcher.py", line 264, in process_update
if handler.check_update(update):
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/conversationhandler.py", line 181, in check_update
key = self._get_key(update)
File "/Users/jelle/.venv/lib/python3.6/site-packages/telegram/ext/conversationhandler.py", line 164, in _get_key
key.append(user.id)
AttributeError: 'NoneType' object has no attribute 'id'
```
Message.from is not mandatory due to channels
Currently it's a required attribute in the Message constructor. We should fix that.
CC: @tsnoam
| [
{
"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2017\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\" This module contains the ConversationHandler \"\"\"\n\nimport logging\n\nfrom telegram import Update\nfrom telegram.ext import (Handler, CallbackQueryHandler, InlineQueryHandler,\n ChosenInlineResultHandler)\nfrom telegram.utils.promise import Promise\n\n\nclass ConversationHandler(Handler):\n \"\"\"\n A handler to hold a conversation with a single user by managing four collections of other\n handlers. Note that neither posts in Telegram Channels, nor group interactions with multiple\n users are managed by instances of this class.\n\n The first collection, a ``list`` named ``entry_points``, is used to initiate the conversation,\n for example with a ``CommandHandler`` or ``RegexHandler``.\n\n The second collection, a ``dict`` named ``states``, contains the different conversation steps\n and one or more associated handlers that should be used if the user sends a message when the\n conversation with them is currently in that state. You will probably use mostly\n ``MessageHandler`` and ``RegexHandler`` here.\n\n The third collection, a ``list`` named ``fallbacks``, is used if the user is currently in a\n conversation but the state has either no associated handler or the handler that is associated\n to the state is inappropriate for the update, for example if the update contains a command, but\n a regular text message is expected. You could use this for a ``/cancel`` command or to let the\n user know their message was not recognized.\n\n The fourth, optional collection of handlers, a ``list`` named ``timed_out_behavior`` is used if\n the wait for ``run_async`` takes longer than defined in ``run_async_timeout``. For example,\n you can let the user know that they should wait for a bit before they can continue.\n\n To change the state of conversation, the callback function of a handler must return the new\n state after responding to the user. If it does not return anything (returning ``None`` by\n default), the state will not change. To end the conversation, the callback function must\n return ``CallbackHandler.END`` or ``-1``.\n\n Args:\n entry_points (list): A list of ``Handler`` objects that can trigger the start of the\n conversation. The first handler which ``check_update`` method returns ``True`` will be\n used. If all return ``False``, the update is not handled.\n states (dict): A ``dict[object: list[Handler]]`` that defines the different states of\n conversation a user can be in and one or more associated ``Handler`` objects that\n should be used in that state. The first handler which ``check_update`` method returns\n ``True`` will be used.\n fallbacks (list): A list of handlers that might be used if the user is in a conversation,\n but every handler for their current state returned ``False`` on ``check_update``.\n The first handler which ``check_update`` method returns ``True`` will be used. If all\n return ``False``, the update is not handled.\n allow_reentry (Optional[bool]): If set to ``True``, a user that is currently in a\n conversation can restart the conversation by triggering one of the entry points.\n run_async_timeout (Optional[float]): If the previous handler for this user was running\n asynchronously using the ``run_async`` decorator, it might not be finished when the\n next message arrives. This timeout defines how long the conversation handler should\n wait for the next state to be computed. The default is ``None`` which means it will\n wait indefinitely.\n timed_out_behavior (Optional[list]): A list of handlers that might be used if\n the wait for ``run_async`` timed out. The first handler which ``check_update`` method\n returns ``True`` will be used. If all return ``False``, the update is not handled.\n\n \"\"\"\n\n END = -1\n\n def __init__(self,\n entry_points,\n states,\n fallbacks,\n allow_reentry=False,\n run_async_timeout=None,\n timed_out_behavior=None,\n per_chat=True,\n per_user=True,\n per_message=False):\n\n self.entry_points = entry_points\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.states = states\n \"\"\":type: dict[str: telegram.ext.Handler]\"\"\"\n\n self.fallbacks = fallbacks\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.allow_reentry = allow_reentry\n self.run_async_timeout = run_async_timeout\n\n self.timed_out_behavior = timed_out_behavior\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.conversations = dict()\n self.per_user = per_user\n self.per_chat = per_chat\n self.per_message = per_message\n \"\"\":type: dict[tuple: object]\"\"\"\n\n self.current_conversation = None\n self.current_handler = None\n\n self.logger = logging.getLogger(__name__)\n\n if not any((self.per_user, self.per_chat, self.per_message)):\n raise ValueError(\"'per_user', 'per_chat' and 'per_message' can't all be 'False'\")\n\n if self.per_message and not self.per_chat:\n logging.warning(\"If 'per_message=True' is used, 'per_chat=True' should also be used, \"\n \"since message IDs are not globally unique.\")\n\n all_handlers = list()\n all_handlers.extend(entry_points)\n all_handlers.extend(fallbacks)\n\n for state_handlers in states.values():\n all_handlers.extend(state_handlers)\n\n if self.per_message:\n for handler in all_handlers:\n if not isinstance(handler, CallbackQueryHandler):\n logging.warning(\"If 'per_message=True', all entry points and state handlers\"\n \" must be 'CallbackQueryHandler', since no other handlers \"\n \"have a message context.\")\n else:\n for handler in all_handlers:\n if isinstance(handler, CallbackQueryHandler):\n logging.warning(\"If 'per_message=False', 'CallbackQueryHandler' will not be \"\n \"tracked for every message.\")\n\n if self.per_chat:\n for handler in all_handlers:\n if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):\n logging.warning(\"If 'per_chat=True', 'InlineQueryHandler' can not be used, \"\n \"since inline queries have no chat context.\")\n\n def _get_key(self, update):\n chat = update.effective_chat\n user = update.effective_user\n\n key = list()\n\n if self.per_chat:\n key.append(chat.id)\n\n if self.per_user:\n key.append(user.id)\n\n if self.per_message:\n key.append(update.callback_query.inline_message_id\n or update.callback_query.message.message_id)\n\n return tuple(key)\n\n def check_update(self, update):\n\n # Ignore messages in channels\n if (not isinstance(update, Update) or update.channel_post or self.per_chat\n and (update.inline_query or update.chosen_inline_result) or self.per_message\n and not update.callback_query or update.callback_query and self.per_chat\n and not update.callback_query.message):\n return False\n\n key = self._get_key(update)\n state = self.conversations.get(key)\n\n # Resolve promises\n if isinstance(state, tuple) and len(state) is 2 and isinstance(state[1], Promise):\n self.logger.debug('waiting for promise...')\n\n old_state, new_state = state\n error = False\n try:\n res = new_state.result(timeout=self.run_async_timeout)\n except Exception as exc:\n self.logger.exception(\"Promise function raised exception\")\n self.logger.exception(\"{}\".format(exc))\n error = True\n\n if not error and new_state.done.is_set():\n self.update_state(res, key)\n state = self.conversations.get(key)\n\n else:\n for candidate in (self.timed_out_behavior or []):\n if candidate.check_update(update):\n # Save the current user and the selected handler for handle_update\n self.current_conversation = key\n self.current_handler = candidate\n\n return True\n\n else:\n return False\n\n self.logger.debug('selecting conversation %s with state %s' % (str(key), str(state)))\n\n handler = None\n\n # Search entry points for a match\n if state is None or self.allow_reentry:\n for entry_point in self.entry_points:\n if entry_point.check_update(update):\n handler = entry_point\n break\n\n else:\n if state is None:\n return False\n\n # Get the handler list for current state, if we didn't find one yet and we're still here\n if state is not None and not handler:\n handlers = self.states.get(state)\n\n for candidate in (handlers or []):\n if candidate.check_update(update):\n handler = candidate\n break\n\n # Find a fallback handler if all other handlers fail\n else:\n for fallback in self.fallbacks:\n if fallback.check_update(update):\n handler = fallback\n break\n\n else:\n return False\n\n # Save the current user and the selected handler for handle_update\n self.current_conversation = key\n self.current_handler = handler\n\n return True\n\n def handle_update(self, update, dispatcher):\n\n new_state = self.current_handler.handle_update(update, dispatcher)\n\n self.update_state(new_state, self.current_conversation)\n\n def update_state(self, new_state, key):\n if new_state == self.END:\n if key in self.conversations:\n del self.conversations[key]\n else:\n pass\n\n elif isinstance(new_state, Promise):\n self.conversations[key] = (self.conversations.get(key), new_state)\n\n elif new_state is not None:\n self.conversations[key] = new_state\n",
"path": "telegram/ext/conversationhandler.py"
}
] | [
{
"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2017\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\" This module contains the ConversationHandler \"\"\"\n\nimport logging\n\nfrom telegram import Update\nfrom telegram.ext import (Handler, CallbackQueryHandler, InlineQueryHandler,\n ChosenInlineResultHandler)\nfrom telegram.utils.promise import Promise\n\n\nclass ConversationHandler(Handler):\n \"\"\"\n A handler to hold a conversation with a single user by managing four collections of other\n handlers. Note that neither posts in Telegram Channels, nor group interactions with multiple\n users are managed by instances of this class.\n\n The first collection, a ``list`` named ``entry_points``, is used to initiate the conversation,\n for example with a ``CommandHandler`` or ``RegexHandler``.\n\n The second collection, a ``dict`` named ``states``, contains the different conversation steps\n and one or more associated handlers that should be used if the user sends a message when the\n conversation with them is currently in that state. You will probably use mostly\n ``MessageHandler`` and ``RegexHandler`` here.\n\n The third collection, a ``list`` named ``fallbacks``, is used if the user is currently in a\n conversation but the state has either no associated handler or the handler that is associated\n to the state is inappropriate for the update, for example if the update contains a command, but\n a regular text message is expected. You could use this for a ``/cancel`` command or to let the\n user know their message was not recognized.\n\n The fourth, optional collection of handlers, a ``list`` named ``timed_out_behavior`` is used if\n the wait for ``run_async`` takes longer than defined in ``run_async_timeout``. For example,\n you can let the user know that they should wait for a bit before they can continue.\n\n To change the state of conversation, the callback function of a handler must return the new\n state after responding to the user. If it does not return anything (returning ``None`` by\n default), the state will not change. To end the conversation, the callback function must\n return ``CallbackHandler.END`` or ``-1``.\n\n Args:\n entry_points (list): A list of ``Handler`` objects that can trigger the start of the\n conversation. The first handler which ``check_update`` method returns ``True`` will be\n used. If all return ``False``, the update is not handled.\n states (dict): A ``dict[object: list[Handler]]`` that defines the different states of\n conversation a user can be in and one or more associated ``Handler`` objects that\n should be used in that state. The first handler which ``check_update`` method returns\n ``True`` will be used.\n fallbacks (list): A list of handlers that might be used if the user is in a conversation,\n but every handler for their current state returned ``False`` on ``check_update``.\n The first handler which ``check_update`` method returns ``True`` will be used. If all\n return ``False``, the update is not handled.\n allow_reentry (Optional[bool]): If set to ``True``, a user that is currently in a\n conversation can restart the conversation by triggering one of the entry points.\n run_async_timeout (Optional[float]): If the previous handler for this user was running\n asynchronously using the ``run_async`` decorator, it might not be finished when the\n next message arrives. This timeout defines how long the conversation handler should\n wait for the next state to be computed. The default is ``None`` which means it will\n wait indefinitely.\n timed_out_behavior (Optional[list]): A list of handlers that might be used if\n the wait for ``run_async`` timed out. The first handler which ``check_update`` method\n returns ``True`` will be used. If all return ``False``, the update is not handled.\n\n \"\"\"\n\n END = -1\n\n def __init__(self,\n entry_points,\n states,\n fallbacks,\n allow_reentry=False,\n run_async_timeout=None,\n timed_out_behavior=None,\n per_chat=True,\n per_user=True,\n per_message=False):\n\n self.entry_points = entry_points\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.states = states\n \"\"\":type: dict[str: telegram.ext.Handler]\"\"\"\n\n self.fallbacks = fallbacks\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.allow_reentry = allow_reentry\n self.run_async_timeout = run_async_timeout\n\n self.timed_out_behavior = timed_out_behavior\n \"\"\":type: list[telegram.ext.Handler]\"\"\"\n\n self.conversations = dict()\n self.per_user = per_user\n self.per_chat = per_chat\n self.per_message = per_message\n \"\"\":type: dict[tuple: object]\"\"\"\n\n self.current_conversation = None\n self.current_handler = None\n\n self.logger = logging.getLogger(__name__)\n\n if not any((self.per_user, self.per_chat, self.per_message)):\n raise ValueError(\"'per_user', 'per_chat' and 'per_message' can't all be 'False'\")\n\n if self.per_message and not self.per_chat:\n logging.warning(\"If 'per_message=True' is used, 'per_chat=True' should also be used, \"\n \"since message IDs are not globally unique.\")\n\n all_handlers = list()\n all_handlers.extend(entry_points)\n all_handlers.extend(fallbacks)\n\n for state_handlers in states.values():\n all_handlers.extend(state_handlers)\n\n if self.per_message:\n for handler in all_handlers:\n if not isinstance(handler, CallbackQueryHandler):\n logging.warning(\"If 'per_message=True', all entry points and state handlers\"\n \" must be 'CallbackQueryHandler', since no other handlers \"\n \"have a message context.\")\n else:\n for handler in all_handlers:\n if isinstance(handler, CallbackQueryHandler):\n logging.warning(\"If 'per_message=False', 'CallbackQueryHandler' will not be \"\n \"tracked for every message.\")\n\n if self.per_chat:\n for handler in all_handlers:\n if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):\n logging.warning(\"If 'per_chat=True', 'InlineQueryHandler' can not be used, \"\n \"since inline queries have no chat context.\")\n\n def _get_key(self, update):\n chat = update.effective_chat\n user = update.effective_user\n\n key = list()\n\n if self.per_chat:\n key.append(chat.id)\n\n if self.per_user and user is not None:\n key.append(user.id)\n\n if self.per_message:\n key.append(update.callback_query.inline_message_id\n or update.callback_query.message.message_id)\n\n return tuple(key)\n\n def check_update(self, update):\n\n # Ignore messages in channels\n if (not isinstance(update, Update) or update.channel_post or self.per_chat\n and (update.inline_query or update.chosen_inline_result) or self.per_message\n and not update.callback_query or update.callback_query and self.per_chat\n and not update.callback_query.message):\n return False\n\n key = self._get_key(update)\n state = self.conversations.get(key)\n\n # Resolve promises\n if isinstance(state, tuple) and len(state) is 2 and isinstance(state[1], Promise):\n self.logger.debug('waiting for promise...')\n\n old_state, new_state = state\n error = False\n try:\n res = new_state.result(timeout=self.run_async_timeout)\n except Exception as exc:\n self.logger.exception(\"Promise function raised exception\")\n self.logger.exception(\"{}\".format(exc))\n error = True\n\n if not error and new_state.done.is_set():\n self.update_state(res, key)\n state = self.conversations.get(key)\n\n else:\n for candidate in (self.timed_out_behavior or []):\n if candidate.check_update(update):\n # Save the current user and the selected handler for handle_update\n self.current_conversation = key\n self.current_handler = candidate\n\n return True\n\n else:\n return False\n\n self.logger.debug('selecting conversation %s with state %s' % (str(key), str(state)))\n\n handler = None\n\n # Search entry points for a match\n if state is None or self.allow_reentry:\n for entry_point in self.entry_points:\n if entry_point.check_update(update):\n handler = entry_point\n break\n\n else:\n if state is None:\n return False\n\n # Get the handler list for current state, if we didn't find one yet and we're still here\n if state is not None and not handler:\n handlers = self.states.get(state)\n\n for candidate in (handlers or []):\n if candidate.check_update(update):\n handler = candidate\n break\n\n # Find a fallback handler if all other handlers fail\n else:\n for fallback in self.fallbacks:\n if fallback.check_update(update):\n handler = fallback\n break\n\n else:\n return False\n\n # Save the current user and the selected handler for handle_update\n self.current_conversation = key\n self.current_handler = handler\n\n return True\n\n def handle_update(self, update, dispatcher):\n\n new_state = self.current_handler.handle_update(update, dispatcher)\n\n self.update_state(new_state, self.current_conversation)\n\n def update_state(self, new_state, key):\n if new_state == self.END:\n if key in self.conversations:\n del self.conversations[key]\n else:\n pass\n\n elif isinstance(new_state, Promise):\n self.conversations[key] = (self.conversations.get(key), new_state)\n\n elif new_state is not None:\n self.conversations[key] = new_state\n",
"path": "telegram/ext/conversationhandler.py"
}
] | diff --git a/AUTHORS.rst b/AUTHORS.rst
index 040853d53e9..5a30084174a 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -32,6 +32,7 @@ The following wonderful people contributed directly or indirectly to this projec
- `Jacob Bom <https://github.com/bomjacob>`_
- `JASON0916 <https://github.com/JASON0916>`_
- `jeffffc <https://github.com/jeffffc>`_
+- `Jelle Besseling <https://github.com/pingiun>`_
- `jh0ker <https://github.com/jh0ker>`_
- `John Yong <https://github.com/whipermr5>`_
- `jossalgon <https://github.com/jossalgon>`_
diff --git a/telegram/ext/conversationhandler.py b/telegram/ext/conversationhandler.py
index bfe880047f1..e6f9b6da4c6 100644
--- a/telegram/ext/conversationhandler.py
+++ b/telegram/ext/conversationhandler.py
@@ -160,7 +160,7 @@ def _get_key(self, update):
if self.per_chat:
key.append(chat.id)
- if self.per_user:
+ if self.per_user and user is not None:
key.append(user.id)
if self.per_message:
diff --git a/tests/test_conversationhandler.py b/tests/test_conversationhandler.py
index f15f28df6a4..231306da78c 100644
--- a/tests/test_conversationhandler.py
+++ b/tests/test_conversationhandler.py
@@ -330,6 +330,12 @@ def test_perChatMessageWithoutChat(self):
update = Update(0, callback_query=cbq)
handler.check_update(update)
+ def test_channelMessageWithoutChat(self):
+ handler = ConversationHandler(entry_points=[CommandHandler('start', self.start_end)], states={}, fallbacks=[])
+ message = Message(0, None, None, Chat(0, Chat.CHANNEL, "Misses Test"))
+ update = Update(0, message=message)
+ handler.check_update(update)
+
if __name__ == '__main__':
unittest.main()
|
OctoPrint__OctoPrint-973 | Add more longRunningCommands (Specifically M400)
1. What were you doing?
> Running a print that makes liberal use of `M400`s
1. What did you expect to happen?
> The print to finish to completion
1. What happened instead?
> The print failed with a communication error
1. Branch & Commit or Version of OctoPrint:
> Version: 1.3.0-dev-71-g3cb8757 (HEAD branch)
1. Printer model & used firmware incl. version
(if applicable - always include if unsure):
> Marlin Integration Branch (3c54992c1c76af1c4206fb4b1ae915ad6873f3bb)
1. Browser and Version of Browser, Operating
System running Browser (if applicable - always
include if unsure):
> Chrome on Windows
1. Link to octoprint.log on gist.github.com or pastebin.com
(ALWAYS INCLUDE AND DO NOT TRUNCATE):
> N/A
1. Link to contents of terminal tab or serial.log on
gist.github.com or pastebin.com (if applicable - always
include if unsure or reporting communication issues AND
DO NOT TRUNCATE):
> N/A
1. Link to contents of Javascript console in the browser
on gist.github.com or pastebin.com or alternatively a
screenshot (if applicable - always include if unsure
or reporting UI issues):
> N/A
1. Screenshot(s) showing the problem (if applicable - always
include if unsure or reporting UI issues):
> N/A
I have read the FAQ.
I use M400 a good amount in my GCode, and that combined with a large move buffer can cause >30s delay between sending and recieving the response. This is fixed by adding M400 to the "Long running commands" list in the settings. I think it should be there by default.
| [
{
"content": "# coding=utf-8\n\"\"\"\nThis module represents OctoPrint's settings management. Within this module the default settings for the core\napplication are defined and the instance of the :class:`Settings` is held, which offers getter and setter\nmethods for the raw configuration values as well as various convenience methods to access the paths to base folders\nof various types and the configuration file itself.\n\n.. autodata:: default_settings\n :annotation: = dict(...)\n\n.. autodata:: valid_boolean_trues\n\n.. autofunction:: settings\n\n.. autoclass:: Settings\n :members:\n :undoc-members:\n\"\"\"\n\nfrom __future__ import absolute_import\n\n__author__ = \"Gina Häußge <[email protected]>\"\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n__copyright__ = \"Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License\"\n\nimport sys\nimport os\nimport yaml\nimport logging\nimport re\nimport uuid\n\n_APPNAME = \"OctoPrint\"\n\n_instance = None\n\ndef settings(init=False, basedir=None, configfile=None):\n\t\"\"\"\n\tFactory method for initially constructing and consecutively retrieving the :class:`~octoprint.settings.Settings`\n\tsingleton.\n\n\tArguments:\n\t init (boolean): A flag indicating whether this is the initial call to construct the singleton (True) or not\n\t (False, default). If this is set to True and the plugin manager has already been initialized, a :class:`ValueError`\n\t will be raised. The same will happen if the plugin manager has not yet been initialized and this is set to\n\t False.\n\t basedir (str): Path of the base directoy for all of OctoPrint's settings, log files, uploads etc. If not set\n\t the default will be used: ``~/.octoprint`` on Linux, ``%APPDATA%/OctoPrint`` on Windows and\n\t ``~/Library/Application Support/OctoPrint`` on MacOS.\n\t configfile (str): Path of the configuration file (``config.yaml``) to work on. If not set the default will\n\t be used: ``<basedir>/config.yaml`` for ``basedir`` as defined above.\n\n\tReturns:\n\t Settings: The fully initialized :class:`Settings` instance.\n\n\tRaises:\n\t ValueError: ``init`` is True but settings are already initialized or vice versa.\n\t\"\"\"\n\tglobal _instance\n\tif _instance is not None:\n\t\tif init:\n\t\t\traise ValueError(\"Settings Manager already initialized\")\n\n\telse:\n\t\tif init:\n\t\t\t_instance = Settings(configfile=configfile, basedir=basedir)\n\t\telse:\n\t\t\traise ValueError(\"Settings not initialized yet\")\n\n\treturn _instance\n\ndefault_settings = {\n\t\"serial\": {\n\t\t\"port\": None,\n\t\t\"baudrate\": None,\n\t\t\"autoconnect\": False,\n\t\t\"log\": False,\n\t\t\"timeout\": {\n\t\t\t\"detection\": 0.5,\n\t\t\t\"connection\": 10,\n\t\t\t\"communication\": 30,\n\t\t\t\"temperature\": 5,\n\t\t\t\"sdStatus\": 1\n\t\t},\n\t\t\"additionalPorts\": [],\n\t\t\"longRunningCommands\": [\"G4\", \"G28\", \"G29\", \"G30\", \"G32\"]\n\t},\n\t\"server\": {\n\t\t\"host\": \"0.0.0.0\",\n\t\t\"port\": 5000,\n\t\t\"firstRun\": True,\n\t\t\"secretKey\": None,\n\t\t\"reverseProxy\": {\n\t\t\t\"prefixHeader\": \"X-Script-Name\",\n\t\t\t\"schemeHeader\": \"X-Scheme\",\n\t\t\t\"hostHeader\": \"X-Forwarded-Host\",\n\t\t\t\"prefixFallback\": \"\",\n\t\t\t\"schemeFallback\": \"\",\n\t\t\t\"hostFallback\": \"\"\n\t\t},\n\t\t\"uploads\": {\n\t\t\t\"maxSize\": 1 * 1024 * 1024 * 1024, # 1GB\n\t\t\t\"nameSuffix\": \"name\",\n\t\t\t\"pathSuffix\": \"path\"\n\t\t},\n\t\t\"maxSize\": 100 * 1024, # 100 KB\n\t},\n\t\"webcam\": {\n\t\t\"stream\": None,\n\t\t\"snapshot\": None,\n\t\t\"ffmpeg\": None,\n\t\t\"ffmpegThreads\": 1,\n\t\t\"bitrate\": \"5000k\",\n\t\t\"watermark\": True,\n\t\t\"flipH\": False,\n\t\t\"flipV\": False,\n\t\t\"rotate90\" : False,\n\t\t\"timelapse\": {\n\t\t\t\"type\": \"off\",\n\t\t\t\"options\": {},\n\t\t\t\"postRoll\": 0,\n\t\t\t\"fps\": 25\n\t\t}\n\t},\n\t\"gcodeViewer\": {\n\t\t\"enabled\": True,\n\t\t\"mobileSizeThreshold\": 2 * 1024 * 1024, # 2MB\n\t\t\"sizeThreshold\": 20 * 1024 * 1024, # 20MB\n\t},\n\t\"gcodeAnalysis\": {\n\t\t\"maxExtruders\": 10\n\t},\n\t\"feature\": {\n\t\t\"temperatureGraph\": True,\n\t\t\"waitForStartOnConnect\": False,\n\t\t\"alwaysSendChecksum\": False,\n\t\t\"sendChecksumWithUnknownCommands\": False,\n\t\t\"unknownCommandsNeedAck\": False,\n\t\t\"sdSupport\": True,\n\t\t\"sdAlwaysAvailable\": False,\n\t\t\"swallowOkAfterResend\": True,\n\t\t\"repetierTargetTemp\": False,\n\t\t\"externalHeatupDetection\": True,\n\t\t\"supportWait\": True,\n\t\t\"keyboardControl\": True,\n\t\t\"pollWatched\": False\n\t},\n\t\"folder\": {\n\t\t\"uploads\": None,\n\t\t\"timelapse\": None,\n\t\t\"timelapse_tmp\": None,\n\t\t\"logs\": None,\n\t\t\"virtualSd\": None,\n\t\t\"watched\": None,\n\t\t\"plugins\": None,\n\t\t\"slicingProfiles\": None,\n\t\t\"printerProfiles\": None,\n\t\t\"scripts\": None,\n\t\t\"translations\": None,\n\t\t\"generated\": None,\n\t\t\"data\": None\n\t},\n\t\"temperature\": {\n\t\t\"profiles\": [\n\t\t\t{\"name\": \"ABS\", \"extruder\" : 210, \"bed\" : 100 },\n\t\t\t{\"name\": \"PLA\", \"extruder\" : 180, \"bed\" : 60 }\n\t\t],\n\t\t\"cutoff\": 30\n\t},\n\t\"printerProfiles\": {\n\t\t\"default\": None,\n\t\t\"defaultProfile\": {}\n\t},\n\t\"printerParameters\": {\n\t\t\"pauseTriggers\": [],\n\t\t\"defaultExtrusionLength\": 5\n\t},\n\t\"appearance\": {\n\t\t\"name\": \"\",\n\t\t\"color\": \"default\",\n\t\t\"colorTransparent\": False,\n\t\t\"defaultLanguage\": \"_default\",\n\t\t\"components\": {\n\t\t\t\"order\": {\n\t\t\t\t\"navbar\": [\"settings\", \"systemmenu\", \"login\"],\n\t\t\t\t\"sidebar\": [\"connection\", \"state\", \"files\"],\n\t\t\t\t\"tab\": [\"temperature\", \"control\", \"gcodeviewer\", \"terminal\", \"timelapse\"],\n\t\t\t\t\"settings\": [\n\t\t\t\t\t\"section_printer\", \"serial\", \"printerprofiles\", \"temperatures\", \"terminalfilters\", \"gcodescripts\",\n\t\t\t\t\t\"section_features\", \"features\", \"webcam\", \"accesscontrol\", \"api\",\n\t\t\t\t\t\"section_octoprint\", \"folders\", \"appearance\", \"logs\", \"plugin_pluginmanager\", \"plugin_softwareupdate\"\n\t\t\t\t],\n\t\t\t\t\"usersettings\": [\"access\", \"interface\"],\n\t\t\t\t\"generic\": []\n\t\t\t},\n\t\t\t\"disabled\": {\n\t\t\t\t\"navbar\": [],\n\t\t\t\t\"sidebar\": [],\n\t\t\t\t\"tab\": [],\n\t\t\t\t\"settings\": [],\n\t\t\t\t\"usersettings\": [],\n\t\t\t\t\"generic\": []\n\t\t\t}\n\t\t}\n\t},\n\t\"controls\": [],\n\t\"system\": {\n\t\t\"actions\": []\n\t},\n\t\"accessControl\": {\n\t\t\"enabled\": True,\n\t\t\"salt\": None,\n\t\t\"userManager\": \"octoprint.users.FilebasedUserManager\",\n\t\t\"userfile\": None,\n\t\t\"autologinLocal\": False,\n\t\t\"localNetworks\": [\"127.0.0.0/8\"],\n\t\t\"autologinAs\": None\n\t},\n\t\"slicing\": {\n\t\t\"enabled\": True,\n\t\t\"defaultSlicer\": \"cura\",\n\t\t\"defaultProfiles\": None\n\t},\n\t\"events\": {\n\t\t\"enabled\": True,\n\t\t\"subscriptions\": []\n\t},\n\t\"api\": {\n\t\t\"enabled\": True,\n\t\t\"key\": None,\n\t\t\"allowCrossOrigin\": False,\n\t\t\"apps\": {}\n\t},\n\t\"terminalFilters\": [\n\t\t{ \"name\": \"Suppress M105 requests/responses\", \"regex\": \"(Send: M105)|(Recv: ok (B|T\\d*):)\" },\n\t\t{ \"name\": \"Suppress M27 requests/responses\", \"regex\": \"(Send: M27)|(Recv: SD printing byte)\" }\n\t],\n\t\"plugins\": {\n\t\t\"_disabled\": []\n\t},\n\t\"scripts\": {\n\t\t\"gcode\": {\n\t\t\t\"afterPrintCancelled\": \"; disable motors\\nM84\\n\\n;disable all heaters\\n{% snippet 'disable_hotends' %}\\nM140 S0\\n\\n;disable fan\\nM106 S0\",\n\t\t\t\"snippets\": {\n\t\t\t\t\"disable_hotends\": \"{% for tool in range(printer_profile.extruder.count) %}M104 T{{ tool }} S0\\n{% endfor %}\"\n\t\t\t}\n\t\t}\n\t},\n\t\"devel\": {\n\t\t\"stylesheet\": \"css\",\n\t\t\"cache\": {\n\t\t\t\"enabled\": True\n\t\t},\n\t\t\"webassets\": {\n\t\t\t\"minify\": False,\n\t\t\t\"bundle\": True,\n\t\t\t\"clean_on_startup\": True\n\t\t},\n\t\t\"virtualPrinter\": {\n\t\t\t\"enabled\": False,\n\t\t\t\"okAfterResend\": False,\n\t\t\t\"forceChecksum\": False,\n\t\t\t\"okWithLinenumber\": False,\n\t\t\t\"numExtruders\": 1,\n\t\t\t\"includeCurrentToolInTemps\": True,\n\t\t\t\"movementSpeed\": {\n\t\t\t\t\"x\": 6000,\n\t\t\t\t\"y\": 6000,\n\t\t\t\t\"z\": 200,\n\t\t\t\t\"e\": 300\n\t\t\t},\n\t\t\t\"hasBed\": True,\n\t\t\t\"repetierStyleTargetTemperature\": False,\n\t\t\t\"okBeforeCommandOutput\": False,\n\t\t\t\"smoothieTemperatureReporting\": False,\n\t\t\t\"extendedSdFileList\": False,\n\t\t\t\"throttle\": 0.01,\n\t\t\t\"waitOnLongMoves\": False,\n\t\t\t\"rxBuffer\": 64,\n\t\t\t\"txBuffer\": 40,\n\t\t\t\"commandBuffer\": 4,\n\t\t\t\"sendWait\": True,\n\t\t\t\"waitInterval\": 1.0\n\t\t}\n\t}\n}\n\"\"\"The default settings of the core application.\"\"\"\n\nvalid_boolean_trues = [True, \"true\", \"yes\", \"y\", \"1\"]\n\"\"\" Values that are considered to be equivalent to the boolean ``True`` value, used for type conversion in various places.\"\"\"\n\nclass Settings(object):\n\t\"\"\"\n\tThe :class:`Settings` class allows managing all of OctoPrint's settings. It takes care of initializing the settings\n\tdirectory, loading the configuration from ``config.yaml``, persisting changes to disk etc and provides access\n\tmethods for getting and setting specific values from the overall settings structure via paths.\n\n\tA general word on the concept of paths, since they play an important role in OctoPrint's settings management. A\n\tpath is basically a list or tuple consisting of keys to follow down into the settings (which are basically like\n\ta ``dict``) in order to set or retrieve a specific value (or more than one). For example, for a settings\n\tstructure like the following::\n\n\t serial:\n\t port: \"/dev/ttyACM0\"\n\t baudrate: 250000\n\t timeouts:\n\t communication: 20.0\n\t temperature: 5.0\n\t sdStatus: 1.0\n\t connection: 10.0\n\t server:\n\t host: \"0.0.0.0\"\n\t port: 5000\n\n\tthe following paths could be used:\n\n\t========================================== ============================================================================\n\tPath Value\n\t========================================== ============================================================================\n\t``[\"serial\", \"port\"]`` ::\n\n\t \"/dev/ttyACM0\"\n\n\t``[\"serial\", \"timeouts\"]`` ::\n\n\t communication: 20.0\n\t temperature: 5.0\n\t sdStatus: 1.0\n\t connection: 10.0\n\n\t``[\"serial\", \"timeouts\", \"temperature\"]`` ::\n\n\t 5.0\n\n\t``[\"server\", \"port\"]`` ::\n\n\t 5000\n\n\t========================================== ============================================================================\n\n\tHowever, these would be invalid paths: ``[\"key\"]``, ``[\"serial\", \"port\", \"value\"]``, ``[\"server\", \"host\", 3]``.\n\t\"\"\"\n\n\tdef __init__(self, configfile=None, basedir=None):\n\t\tself._logger = logging.getLogger(__name__)\n\n\t\tself._basedir = None\n\n\t\tself._config = None\n\t\tself._dirty = False\n\t\tself._mtime = None\n\n\t\tself._get_preprocessors = dict(\n\t\t\tcontrols=self._process_custom_controls\n\t\t)\n\t\tself._set_preprocessors = dict()\n\n\t\tself._init_basedir(basedir)\n\n\t\tif configfile is not None:\n\t\t\tself._configfile = configfile\n\t\telse:\n\t\t\tself._configfile = os.path.join(self._basedir, \"config.yaml\")\n\t\tself.load(migrate=True)\n\n\t\tif self.get([\"api\", \"key\"]) is None:\n\t\t\tself.set([\"api\", \"key\"], ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes))\n\t\t\tself.save(force=True)\n\n\t\tself._script_env = self._init_script_templating()\n\n\tdef _init_basedir(self, basedir):\n\t\tif basedir is not None:\n\t\t\tself._basedir = basedir\n\t\telse:\n\t\t\tself._basedir = _default_basedir(_APPNAME)\n\n\t\tif not os.path.isdir(self._basedir):\n\t\t\tos.makedirs(self._basedir)\n\n\tdef _get_default_folder(self, type):\n\t\tfolder = default_settings[\"folder\"][type]\n\t\tif folder is None:\n\t\t\tfolder = os.path.join(self._basedir, type.replace(\"_\", os.path.sep))\n\t\treturn folder\n\n\tdef _init_script_templating(self):\n\t\tfrom jinja2 import Environment, BaseLoader, FileSystemLoader, ChoiceLoader, TemplateNotFound\n\t\tfrom jinja2.nodes import Include, Const\n\t\tfrom jinja2.ext import Extension\n\n\t\tclass SnippetExtension(Extension):\n\t\t\ttags = {\"snippet\"}\n\t\t\tfields = Include.fields\n\n\t\t\tdef parse(self, parser):\n\t\t\t\tnode = parser.parse_include()\n\t\t\t\tif not node.template.value.startswith(\"/\"):\n\t\t\t\t\tnode.template.value = \"snippets/\" + node.template.value\n\t\t\t\treturn node\n\n\t\tclass SettingsScriptLoader(BaseLoader):\n\t\t\tdef __init__(self, s):\n\t\t\t\tself._settings = s\n\n\t\t\tdef get_source(self, environment, template):\n\t\t\t\tparts = template.split(\"/\")\n\t\t\t\tif not len(parts):\n\t\t\t\t\traise TemplateNotFound(template)\n\n\t\t\t\tscript = self._settings.get([\"scripts\"], merged=True)\n\t\t\t\tfor part in parts:\n\t\t\t\t\tif isinstance(script, dict) and part in script:\n\t\t\t\t\t\tscript = script[part]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\tsource = script\n\t\t\t\tif source is None:\n\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\tmtime = self._settings._mtime\n\t\t\t\treturn source, None, lambda: mtime == self._settings.last_modified\n\n\t\t\tdef list_templates(self):\n\t\t\t\tscripts = self._settings.get([\"scripts\"], merged=True)\n\t\t\t\treturn self._get_templates(scripts)\n\n\t\t\tdef _get_templates(self, scripts):\n\t\t\t\ttemplates = []\n\t\t\t\tfor key in scripts:\n\t\t\t\t\tif isinstance(scripts[key], dict):\n\t\t\t\t\t\ttemplates += map(lambda x: key + \"/\" + x, self._get_templates(scripts[key]))\n\t\t\t\t\telif isinstance(scripts[key], basestring):\n\t\t\t\t\t\ttemplates.append(key)\n\t\t\t\treturn templates\n\n\t\tclass SelectLoader(BaseLoader):\n\t\t\tdef __init__(self, default, mapping, sep=\":\"):\n\t\t\t\tself._default = default\n\t\t\t\tself._mapping = mapping\n\t\t\t\tself._sep = sep\n\n\t\t\tdef get_source(self, environment, template):\n\t\t\t\tif self._sep in template:\n\t\t\t\t\tprefix, name = template.split(self._sep, 1)\n\t\t\t\t\tif not prefix in self._mapping:\n\t\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\t\treturn self._mapping[prefix].get_source(environment, name)\n\t\t\t\treturn self._default.get_source(environment, template)\n\n\t\t\tdef list_templates(self):\n\t\t\t\treturn self._default.list_templates()\n\n\t\tclass RelEnvironment(Environment):\n\t\t\tdef __init__(self, prefix_sep=\":\", *args, **kwargs):\n\t\t\t\tEnvironment.__init__(self, *args, **kwargs)\n\t\t\t\tself._prefix_sep = prefix_sep\n\n\t\t\tdef join_path(self, template, parent):\n\t\t\t\tprefix, name = self._split_prefix(template)\n\n\t\t\t\tif name.startswith(\"/\"):\n\t\t\t\t\treturn self._join_prefix(prefix, name[1:])\n\t\t\t\telse:\n\t\t\t\t\t_, parent_name = self._split_prefix(parent)\n\t\t\t\t\tparent_base = parent_name.split(\"/\")[:-1]\n\t\t\t\t\treturn self._join_prefix(prefix, \"/\".join(parent_base) + \"/\" + name)\n\n\t\t\tdef _split_prefix(self, template):\n\t\t\t\tif self._prefix_sep in template:\n\t\t\t\t\treturn template.split(self._prefix_sep, 1)\n\t\t\t\telse:\n\t\t\t\t\treturn \"\", template\n\n\t\t\tdef _join_prefix(self, prefix, template):\n\t\t\t\tif len(prefix):\n\t\t\t\t\treturn prefix + self._prefix_sep + template\n\t\t\t\telse:\n\t\t\t\t\treturn template\n\n\t\tfile_system_loader = FileSystemLoader(self.getBaseFolder(\"scripts\"))\n\t\tsettings_loader = SettingsScriptLoader(self)\n\t\tchoice_loader = ChoiceLoader([file_system_loader, settings_loader])\n\t\tselect_loader = SelectLoader(choice_loader, dict(bundled=settings_loader, file=file_system_loader))\n\t\treturn RelEnvironment(loader=select_loader, extensions=[SnippetExtension])\n\n\tdef _get_script_template(self, script_type, name, source=False):\n\t\tfrom jinja2 import TemplateNotFound\n\n\t\ttemplate_name = script_type + \"/\" + name\n\t\ttry:\n\t\t\tif source:\n\t\t\t\ttemplate_name, _, _ = self._script_env.loader.get_source(self._script_env, template_name)\n\t\t\t\treturn template_name\n\t\t\telse:\n\t\t\t\treturn self._script_env.get_template(template_name)\n\t\texcept TemplateNotFound:\n\t\t\treturn None\n\t\texcept:\n\t\t\tself._logger.exception(\"Exception while trying to resolve template {template_name}\".format(**locals()))\n\t\t\treturn None\n\n\tdef _get_scripts(self, script_type):\n\t\treturn self._script_env.list_templates(filter_func=lambda x: x.startswith(script_type+\"/\"))\n\n\tdef _process_custom_controls(self, controls):\n\t\tdef process_control(c):\n\t\t\t# shallow copy\n\t\t\tresult = dict(c)\n\n\t\t\tif \"regex\" in result and \"template\" in result:\n\t\t\t\t# if it's a template matcher, we need to add a key to associate with the matcher output\n\t\t\t\timport hashlib\n\t\t\t\tkey_hash = hashlib.md5()\n\t\t\t\tkey_hash.update(result[\"regex\"])\n\t\t\t\tresult[\"key\"] = key_hash.hexdigest()\n\n\t\t\t\ttemplate_key_hash = hashlib.md5()\n\t\t\t\ttemplate_key_hash.update(result[\"template\"])\n\t\t\t\tresult[\"template_key\"] = template_key_hash.hexdigest()\n\n\t\t\telif \"children\" in result:\n\t\t\t\t# if it has children we need to process them recursively\n\t\t\t\tresult[\"children\"] = map(process_control, [child for child in result[\"children\"] if child is not None])\n\n\t\t\treturn result\n\n\t\treturn map(process_control, controls)\n\n\t@property\n\tdef effective(self):\n\t\timport octoprint.util\n\t\treturn octoprint.util.dict_merge(default_settings, self._config)\n\n\t@property\n\tdef effective_yaml(self):\n\t\timport yaml\n\t\treturn yaml.safe_dump(self.effective)\n\n\t#~~ load and save\n\n\tdef load(self, migrate=False):\n\t\tif os.path.exists(self._configfile) and os.path.isfile(self._configfile):\n\t\t\twith open(self._configfile, \"r\") as f:\n\t\t\t\tself._config = yaml.safe_load(f)\n\t\t\t\tself._mtime = self.last_modified\n\t\t# changed from else to handle cases where the file exists, but is empty / 0 bytes\n\t\tif not self._config:\n\t\t\tself._config = {}\n\n\t\tif migrate:\n\t\t\tself._migrate_config()\n\n\tdef _migrate_config(self):\n\t\tdirty = False\n\n\t\tmigrators = (\n\t\t\tself._migrate_event_config,\n\t\t\tself._migrate_reverse_proxy_config,\n\t\t\tself._migrate_printer_parameters,\n\t\t\tself._migrate_gcode_scripts\n\t\t)\n\n\t\tfor migrate in migrators:\n\t\t\tdirty = migrate() or dirty\n\t\tif dirty:\n\t\t\tself.save(force=True)\n\n\tdef _migrate_gcode_scripts(self):\n\t\t\"\"\"\n\t\tMigrates an old development version of gcode scripts to the new template based format.\n\t\t\"\"\"\n\n\t\tdirty = False\n\t\tif \"scripts\" in self._config:\n\t\t\tif \"gcode\" in self._config[\"scripts\"]:\n\t\t\t\tif \"templates\" in self._config[\"scripts\"][\"gcode\"]:\n\t\t\t\t\tdel self._config[\"scripts\"][\"gcode\"][\"templates\"]\n\n\t\t\t\treplacements = dict(\n\t\t\t\t\tdisable_steppers=\"M84\",\n\t\t\t\t\tdisable_hotends=\"{% snippet 'disable_hotends' %}\",\n\t\t\t\t\tdisable_bed=\"M140 S0\",\n\t\t\t\t\tdisable_fan=\"M106 S0\"\n\t\t\t\t)\n\n\t\t\t\tfor name, script in self._config[\"scripts\"][\"gcode\"].items():\n\t\t\t\t\tself.saveScript(\"gcode\", name, script.format(**replacements))\n\t\t\tdel self._config[\"scripts\"]\n\t\t\tdirty = True\n\t\treturn dirty\n\n\tdef _migrate_printer_parameters(self):\n\t\t\"\"\"\n\t\tMigrates the old \"printer > parameters\" data structure to the new printer profile mechanism.\n\t\t\"\"\"\n\t\tdefault_profile = self._config[\"printerProfiles\"][\"defaultProfile\"] if \"printerProfiles\" in self._config and \"defaultProfile\" in self._config[\"printerProfiles\"] else dict()\n\t\tdirty = False\n\n\t\tif \"printerParameters\" in self._config:\n\t\t\tprinter_parameters = self._config[\"printerParameters\"]\n\n\t\t\tif \"movementSpeed\" in printer_parameters or \"invertAxes\" in printer_parameters:\n\t\t\t\tdefault_profile[\"axes\"] = dict(x=dict(), y=dict(), z=dict(), e=dict())\n\t\t\t\tif \"movementSpeed\" in printer_parameters:\n\t\t\t\t\tfor axis in (\"x\", \"y\", \"z\", \"e\"):\n\t\t\t\t\t\tif axis in printer_parameters[\"movementSpeed\"]:\n\t\t\t\t\t\t\tdefault_profile[\"axes\"][axis][\"speed\"] = printer_parameters[\"movementSpeed\"][axis]\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"movementSpeed\"]\n\t\t\t\tif \"invertedAxes\" in printer_parameters:\n\t\t\t\t\tfor axis in (\"x\", \"y\", \"z\", \"e\"):\n\t\t\t\t\t\tif axis in printer_parameters[\"invertedAxes\"]:\n\t\t\t\t\t\t\tdefault_profile[\"axes\"][axis][\"inverted\"] = True\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"invertedAxes\"]\n\n\t\t\tif \"numExtruders\" in printer_parameters or \"extruderOffsets\" in printer_parameters:\n\t\t\t\tif not \"extruder\" in default_profile:\n\t\t\t\t\tdefault_profile[\"extruder\"] = dict()\n\n\t\t\t\tif \"numExtruders\" in printer_parameters:\n\t\t\t\t\tdefault_profile[\"extruder\"][\"count\"] = printer_parameters[\"numExtruders\"]\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"numExtruders\"]\n\t\t\t\tif \"extruderOffsets\" in printer_parameters:\n\t\t\t\t\textruder_offsets = []\n\t\t\t\t\tfor offset in printer_parameters[\"extruderOffsets\"]:\n\t\t\t\t\t\tif \"x\" in offset and \"y\" in offset:\n\t\t\t\t\t\t\textruder_offsets.append((offset[\"x\"], offset[\"y\"]))\n\t\t\t\t\tdefault_profile[\"extruder\"][\"offsets\"] = extruder_offsets\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"extruderOffsets\"]\n\n\t\t\tif \"bedDimensions\" in printer_parameters:\n\t\t\t\tbed_dimensions = printer_parameters[\"bedDimensions\"]\n\t\t\t\tif not \"volume\" in default_profile:\n\t\t\t\t\tdefault_profile[\"volume\"] = dict()\n\n\t\t\t\tif \"circular\" in bed_dimensions and \"r\" in bed_dimensions and bed_dimensions[\"circular\"]:\n\t\t\t\t\tdefault_profile[\"volume\"][\"formFactor\"] = \"circular\"\n\t\t\t\t\tdefault_profile[\"volume\"][\"width\"] = 2 * bed_dimensions[\"r\"]\n\t\t\t\t\tdefault_profile[\"volume\"][\"depth\"] = default_profile[\"volume\"][\"width\"]\n\t\t\t\telif \"x\" in bed_dimensions or \"y\" in bed_dimensions:\n\t\t\t\t\tdefault_profile[\"volume\"][\"formFactor\"] = \"rectangular\"\n\t\t\t\t\tif \"x\" in bed_dimensions:\n\t\t\t\t\t\tdefault_profile[\"volume\"][\"width\"] = bed_dimensions[\"x\"]\n\t\t\t\t\tif \"y\" in bed_dimensions:\n\t\t\t\t\t\tdefault_profile[\"volume\"][\"depth\"] = bed_dimensions[\"y\"]\n\t\t\t\tdel self._config[\"printerParameters\"][\"bedDimensions\"]\n\n\t\t\tdirty = True\n\n\t\tif dirty:\n\t\t\tif not \"printerProfiles\" in self._config:\n\t\t\t\tself._config[\"printerProfiles\"] = dict()\n\t\t\tself._config[\"printerProfiles\"][\"defaultProfile\"] = default_profile\n\t\treturn dirty\n\n\tdef _migrate_reverse_proxy_config(self):\n\t\t\"\"\"\n\t\tMigrates the old \"server > baseUrl\" and \"server > scheme\" configuration entries to\n\t\t\"server > reverseProxy > prefixFallback\" and \"server > reverseProxy > schemeFallback\".\n\t\t\"\"\"\n\t\tif \"server\" in self._config.keys() and (\"baseUrl\" in self._config[\"server\"] or \"scheme\" in self._config[\"server\"]):\n\t\t\tprefix = \"\"\n\t\t\tif \"baseUrl\" in self._config[\"server\"]:\n\t\t\t\tprefix = self._config[\"server\"][\"baseUrl\"]\n\t\t\t\tdel self._config[\"server\"][\"baseUrl\"]\n\n\t\t\tscheme = \"\"\n\t\t\tif \"scheme\" in self._config[\"server\"]:\n\t\t\t\tscheme = self._config[\"server\"][\"scheme\"]\n\t\t\t\tdel self._config[\"server\"][\"scheme\"]\n\n\t\t\tif not \"reverseProxy\" in self._config[\"server\"] or not isinstance(self._config[\"server\"][\"reverseProxy\"], dict):\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"] = dict()\n\t\t\tif prefix:\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"][\"prefixFallback\"] = prefix\n\t\t\tif scheme:\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"][\"schemeFallback\"] = scheme\n\t\t\tself._logger.info(\"Migrated reverse proxy configuration to new structure\")\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef _migrate_event_config(self):\n\t\t\"\"\"\n\t\tMigrates the old event configuration format of type \"events > gcodeCommandTrigger\" and\n\t\t\"event > systemCommandTrigger\" to the new events format.\n\t\t\"\"\"\n\t\tif \"events\" in self._config.keys() and (\"gcodeCommandTrigger\" in self._config[\"events\"] or \"systemCommandTrigger\" in self._config[\"events\"]):\n\t\t\tself._logger.info(\"Migrating config (event subscriptions)...\")\n\n\t\t\t# migrate event hooks to new format\n\t\t\tplaceholderRe = re.compile(\"%\\((.*?)\\)s\")\n\n\t\t\teventNameReplacements = {\n\t\t\t\t\"ClientOpen\": \"ClientOpened\",\n\t\t\t\t\"TransferStart\": \"TransferStarted\"\n\t\t\t}\n\t\t\tpayloadDataReplacements = {\n\t\t\t\t\"Upload\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"Connected\": {\"data\": \"{port} at {baudrate} baud\"},\n\t\t\t\t\"FileSelected\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"TransferStarted\": {\"data\": \"{remote}\", \"filename\": \"{remote}\"},\n\t\t\t\t\"TransferDone\": {\"data\": \"{remote}\", \"filename\": \"{remote}\"},\n\t\t\t\t\"ZChange\": {\"data\": \"{new}\"},\n\t\t\t\t\"CaptureStart\": {\"data\": \"{file}\"},\n\t\t\t\t\"CaptureDone\": {\"data\": \"{file}\"},\n\t\t\t\t\"MovieDone\": {\"data\": \"{movie}\", \"filename\": \"{gcode}\"},\n\t\t\t\t\"Error\": {\"data\": \"{error}\"},\n\t\t\t\t\"PrintStarted\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"PrintDone\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t}\n\n\t\t\tdef migrateEventHook(event, command):\n\t\t\t\t# migrate placeholders\n\t\t\t\tcommand = placeholderRe.sub(\"{__\\\\1}\", command)\n\n\t\t\t\t# migrate event names\n\t\t\t\tif event in eventNameReplacements:\n\t\t\t\t\tevent = eventNameReplacements[\"event\"]\n\n\t\t\t\t# migrate payloads to more specific placeholders\n\t\t\t\tif event in payloadDataReplacements:\n\t\t\t\t\tfor key in payloadDataReplacements[event]:\n\t\t\t\t\t\tcommand = command.replace(\"{__%s}\" % key, payloadDataReplacements[event][key])\n\n\t\t\t\t# return processed tuple\n\t\t\t\treturn event, command\n\n\t\t\tdisableSystemCommands = False\n\t\t\tif \"systemCommandTrigger\" in self._config[\"events\"] and \"enabled\" in self._config[\"events\"][\"systemCommandTrigger\"]:\n\t\t\t\tdisableSystemCommands = not self._config[\"events\"][\"systemCommandTrigger\"][\"enabled\"]\n\n\t\t\tdisableGcodeCommands = False\n\t\t\tif \"gcodeCommandTrigger\" in self._config[\"events\"] and \"enabled\" in self._config[\"events\"][\"gcodeCommandTrigger\"]:\n\t\t\t\tdisableGcodeCommands = not self._config[\"events\"][\"gcodeCommandTrigger\"][\"enabled\"]\n\n\t\t\tdisableAllCommands = disableSystemCommands and disableGcodeCommands\n\t\t\tnewEvents = {\n\t\t\t\t\"enabled\": not disableAllCommands,\n\t\t\t\t\"subscriptions\": []\n\t\t\t}\n\n\t\t\tif \"systemCommandTrigger\" in self._config[\"events\"] and \"subscriptions\" in self._config[\"events\"][\"systemCommandTrigger\"]:\n\t\t\t\tfor trigger in self._config[\"events\"][\"systemCommandTrigger\"][\"subscriptions\"]:\n\t\t\t\t\tif not (\"event\" in trigger and \"command\" in trigger):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnewTrigger = {\"type\": \"system\"}\n\t\t\t\t\tif disableSystemCommands and not disableAllCommands:\n\t\t\t\t\t\tnewTrigger[\"enabled\"] = False\n\n\t\t\t\t\tnewTrigger[\"event\"], newTrigger[\"command\"] = migrateEventHook(trigger[\"event\"], trigger[\"command\"])\n\t\t\t\t\tnewEvents[\"subscriptions\"].append(newTrigger)\n\n\t\t\tif \"gcodeCommandTrigger\" in self._config[\"events\"] and \"subscriptions\" in self._config[\"events\"][\"gcodeCommandTrigger\"]:\n\t\t\t\tfor trigger in self._config[\"events\"][\"gcodeCommandTrigger\"][\"subscriptions\"]:\n\t\t\t\t\tif not (\"event\" in trigger and \"command\" in trigger):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnewTrigger = {\"type\": \"gcode\"}\n\t\t\t\t\tif disableGcodeCommands and not disableAllCommands:\n\t\t\t\t\t\tnewTrigger[\"enabled\"] = False\n\n\t\t\t\t\tnewTrigger[\"event\"], newTrigger[\"command\"] = migrateEventHook(trigger[\"event\"], trigger[\"command\"])\n\t\t\t\t\tnewTrigger[\"command\"] = newTrigger[\"command\"].split(\",\")\n\t\t\t\t\tnewEvents[\"subscriptions\"].append(newTrigger)\n\n\t\t\tself._config[\"events\"] = newEvents\n\t\t\tself._logger.info(\"Migrated %d event subscriptions to new format and structure\" % len(newEvents[\"subscriptions\"]))\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef save(self, force=False):\n\t\tif not self._dirty and not force:\n\t\t\treturn False\n\n\t\twith open(self._configfile, \"wb\") as configFile:\n\t\t\tyaml.safe_dump(self._config, configFile, default_flow_style=False, indent=\" \", allow_unicode=True)\n\t\t\tself._dirty = False\n\t\tself.load()\n\t\treturn True\n\n\t@property\n\tdef last_modified(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t int: The last modification time of the configuration file.\n\t\t\"\"\"\n\t\tstat = os.stat(self._configfile)\n\t\treturn stat.st_mtime\n\n\t#~~ getter\n\n\tdef get(self, path, asdict=False, config=None, defaults=None, preprocessors=None, merged=False, incl_defaults=True):\n\t\timport octoprint.util as util\n\n\t\tif len(path) == 0:\n\t\t\treturn None\n\n\t\tif config is None:\n\t\t\tconfig = self._config\n\t\tif defaults is None:\n\t\t\tdefaults = default_settings\n\t\tif preprocessors is None:\n\t\t\tpreprocessors = self._get_preprocessors\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config and key in defaults:\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif incl_defaults and key in defaults:\n\t\t\t\tconfig = {}\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors:\n\t\t\t\tpreprocessors = preprocessors[key]\n\n\n\t\tk = path.pop(0)\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tkeys = [k]\n\t\telse:\n\t\t\tkeys = k\n\n\t\tif asdict:\n\t\t\tresults = {}\n\t\telse:\n\t\t\tresults = []\n\t\tfor key in keys:\n\t\t\tif key in config:\n\t\t\t\tvalue = config[key]\n\t\t\t\tif merged and key in defaults:\n\t\t\t\t\tvalue = util.dict_merge(defaults[key], value)\n\t\t\telif incl_defaults and key in defaults:\n\t\t\t\tvalue = defaults[key]\n\t\t\telse:\n\t\t\t\tvalue = None\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors and callable(preprocessors[key]):\n\t\t\t\tvalue = preprocessors[key](value)\n\n\t\t\tif asdict:\n\t\t\t\tresults[key] = value\n\t\t\telse:\n\t\t\t\tresults.append(value)\n\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tif asdict:\n\t\t\t\treturn results.values().pop()\n\t\t\telse:\n\t\t\t\treturn results.pop()\n\t\telse:\n\t\t\treturn results\n\n\tdef getInt(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getFloat(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getBoolean(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\t\tif isinstance(value, bool):\n\t\t\treturn value\n\t\tif isinstance(value, (int, float)):\n\t\t\treturn value != 0\n\t\tif isinstance(value, (str, unicode)):\n\t\t\treturn value.lower() in valid_boolean_trues\n\t\treturn value is not None\n\n\tdef getBaseFolder(self, type, create=True):\n\t\tif type not in default_settings[\"folder\"].keys() + [\"base\"]:\n\t\t\treturn None\n\n\t\tif type == \"base\":\n\t\t\treturn self._basedir\n\n\t\tfolder = self.get([\"folder\", type])\n\t\tif folder is None:\n\t\t\tfolder = self._get_default_folder(type)\n\n\t\tif not os.path.isdir(folder):\n\t\t\tif create:\n\t\t\t\tos.makedirs(folder)\n\t\t\telse:\n\t\t\t\traise IOError(\"No such folder: {folder}\".format(folder=folder))\n\n\t\treturn folder\n\n\tdef listScripts(self, script_type):\n\t\treturn map(lambda x: x[len(script_type + \"/\"):], filter(lambda x: x.startswith(script_type + \"/\"), self._get_scripts(script_type)))\n\n\tdef loadScript(self, script_type, name, context=None, source=False):\n\t\tif context is None:\n\t\t\tcontext = dict()\n\t\tcontext.update(dict(script=dict(type=script_type, name=name)))\n\n\t\ttemplate = self._get_script_template(script_type, name, source=source)\n\t\tif template is None:\n\t\t\treturn None\n\n\t\tif source:\n\t\t\tscript = template\n\t\telse:\n\t\t\ttry:\n\t\t\t\tscript = template.render(**context)\n\t\t\texcept:\n\t\t\t\tself._logger.exception(\"Exception while trying to render script {script_type}:{name}\".format(**locals()))\n\t\t\t\treturn None\n\n\t\treturn script\n\n\t#~~ setter\n\n\tdef set(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif len(path) == 0:\n\t\t\treturn\n\n\t\tif self._mtime is not None and self.last_modified != self._mtime:\n\t\t\tself.load()\n\n\t\tif config is None:\n\t\t\tconfig = self._config\n\t\tif defaults is None:\n\t\t\tdefaults = default_settings\n\t\tif preprocessors is None:\n\t\t\tpreprocessors = self._set_preprocessors\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig[key] = {}\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors:\n\t\t\t\tpreprocessors = preprocessors[key]\n\n\t\tkey = path.pop(0)\n\n\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors and callable(preprocessors[key]):\n\t\t\tvalue = preprocessors[key](value)\n\n\t\tif not force and key in defaults and key in config and defaults[key] == value:\n\t\t\tdel config[key]\n\t\t\tself._dirty = True\n\t\telif force or (not key in config and defaults[key] != value) or (key in config and config[key] != value):\n\t\t\tif value is None and key in config:\n\t\t\t\tdel config[key]\n\t\t\telse:\n\t\t\t\tconfig[key] = value\n\t\t\tself._dirty = True\n\n\tdef setInt(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None:\n\t\t\tself.set(path, None, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tintValue = int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, intValue, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setFloat(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None:\n\t\t\tself.set(path, None, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tfloatValue = float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, floatValue, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setBoolean(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None or isinstance(value, bool):\n\t\t\tself.set(path, value, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\telif value.lower() in valid_boolean_trues:\n\t\t\tself.set(path, True, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\telse:\n\t\t\tself.set(path, False, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setBaseFolder(self, type, path, force=False):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tcurrentPath = self.getBaseFolder(type)\n\t\tdefaultPath = self._get_default_folder(type)\n\t\tif (path is None or path == defaultPath) and \"folder\" in self._config.keys() and type in self._config[\"folder\"].keys():\n\t\t\tdel self._config[\"folder\"][type]\n\t\t\tif not self._config[\"folder\"]:\n\t\t\t\tdel self._config[\"folder\"]\n\t\t\tself._dirty = True\n\t\telif (path != currentPath and path != defaultPath) or force:\n\t\t\tif not \"folder\" in self._config.keys():\n\t\t\t\tself._config[\"folder\"] = {}\n\t\t\tself._config[\"folder\"][type] = path\n\t\t\tself._dirty = True\n\n\tdef saveScript(self, script_type, name, script):\n\t\tscript_folder = self.getBaseFolder(\"scripts\")\n\t\tfilename = os.path.realpath(os.path.join(script_folder, script_type, name))\n\t\tif not filename.startswith(script_folder):\n\t\t\t# oops, jail break, that shouldn't happen\n\t\t\traise ValueError(\"Invalid script path to save to: {filename} (from {script_type}:{name})\".format(**locals()))\n\n\t\tpath, _ = os.path.split(filename)\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\twith open(filename, \"w+\") as f:\n\t\t\tf.write(script)\n\ndef _default_basedir(applicationName):\n\t# taken from http://stackoverflow.com/questions/1084697/how-do-i-store-desktop-application-data-in-a-cross-platform-way-for-python\n\tif sys.platform == \"darwin\":\n\t\tfrom AppKit import NSSearchPathForDirectoriesInDomains\n\t\t# http://developer.apple.com/DOCUMENTATION/Cocoa/Reference/Foundation/Miscellaneous/Foundation_Functions/Reference/reference.html#//apple_ref/c/func/NSSearchPathForDirectoriesInDomains\n\t\t# NSApplicationSupportDirectory = 14\n\t\t# NSUserDomainMask = 1\n\t\t# True for expanding the tilde into a fully qualified path\n\t\treturn os.path.join(NSSearchPathForDirectoriesInDomains(14, 1, True)[0], applicationName)\n\telif sys.platform == \"win32\":\n\t\treturn os.path.join(os.environ[\"APPDATA\"], applicationName)\n\telse:\n\t\treturn os.path.expanduser(os.path.join(\"~\", \".\" + applicationName.lower()))\n",
"path": "src/octoprint/settings.py"
}
] | [
{
"content": "# coding=utf-8\n\"\"\"\nThis module represents OctoPrint's settings management. Within this module the default settings for the core\napplication are defined and the instance of the :class:`Settings` is held, which offers getter and setter\nmethods for the raw configuration values as well as various convenience methods to access the paths to base folders\nof various types and the configuration file itself.\n\n.. autodata:: default_settings\n :annotation: = dict(...)\n\n.. autodata:: valid_boolean_trues\n\n.. autofunction:: settings\n\n.. autoclass:: Settings\n :members:\n :undoc-members:\n\"\"\"\n\nfrom __future__ import absolute_import\n\n__author__ = \"Gina Häußge <[email protected]>\"\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n__copyright__ = \"Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License\"\n\nimport sys\nimport os\nimport yaml\nimport logging\nimport re\nimport uuid\n\n_APPNAME = \"OctoPrint\"\n\n_instance = None\n\ndef settings(init=False, basedir=None, configfile=None):\n\t\"\"\"\n\tFactory method for initially constructing and consecutively retrieving the :class:`~octoprint.settings.Settings`\n\tsingleton.\n\n\tArguments:\n\t init (boolean): A flag indicating whether this is the initial call to construct the singleton (True) or not\n\t (False, default). If this is set to True and the plugin manager has already been initialized, a :class:`ValueError`\n\t will be raised. The same will happen if the plugin manager has not yet been initialized and this is set to\n\t False.\n\t basedir (str): Path of the base directoy for all of OctoPrint's settings, log files, uploads etc. If not set\n\t the default will be used: ``~/.octoprint`` on Linux, ``%APPDATA%/OctoPrint`` on Windows and\n\t ``~/Library/Application Support/OctoPrint`` on MacOS.\n\t configfile (str): Path of the configuration file (``config.yaml``) to work on. If not set the default will\n\t be used: ``<basedir>/config.yaml`` for ``basedir`` as defined above.\n\n\tReturns:\n\t Settings: The fully initialized :class:`Settings` instance.\n\n\tRaises:\n\t ValueError: ``init`` is True but settings are already initialized or vice versa.\n\t\"\"\"\n\tglobal _instance\n\tif _instance is not None:\n\t\tif init:\n\t\t\traise ValueError(\"Settings Manager already initialized\")\n\n\telse:\n\t\tif init:\n\t\t\t_instance = Settings(configfile=configfile, basedir=basedir)\n\t\telse:\n\t\t\traise ValueError(\"Settings not initialized yet\")\n\n\treturn _instance\n\ndefault_settings = {\n\t\"serial\": {\n\t\t\"port\": None,\n\t\t\"baudrate\": None,\n\t\t\"autoconnect\": False,\n\t\t\"log\": False,\n\t\t\"timeout\": {\n\t\t\t\"detection\": 0.5,\n\t\t\t\"connection\": 10,\n\t\t\t\"communication\": 30,\n\t\t\t\"temperature\": 5,\n\t\t\t\"sdStatus\": 1\n\t\t},\n\t\t\"additionalPorts\": [],\n\t\t\"longRunningCommands\": [\"G4\", \"G28\", \"G29\", \"G30\", \"G32\", \"M400\", \"M226\"]\n\t},\n\t\"server\": {\n\t\t\"host\": \"0.0.0.0\",\n\t\t\"port\": 5000,\n\t\t\"firstRun\": True,\n\t\t\"secretKey\": None,\n\t\t\"reverseProxy\": {\n\t\t\t\"prefixHeader\": \"X-Script-Name\",\n\t\t\t\"schemeHeader\": \"X-Scheme\",\n\t\t\t\"hostHeader\": \"X-Forwarded-Host\",\n\t\t\t\"prefixFallback\": \"\",\n\t\t\t\"schemeFallback\": \"\",\n\t\t\t\"hostFallback\": \"\"\n\t\t},\n\t\t\"uploads\": {\n\t\t\t\"maxSize\": 1 * 1024 * 1024 * 1024, # 1GB\n\t\t\t\"nameSuffix\": \"name\",\n\t\t\t\"pathSuffix\": \"path\"\n\t\t},\n\t\t\"maxSize\": 100 * 1024, # 100 KB\n\t},\n\t\"webcam\": {\n\t\t\"stream\": None,\n\t\t\"snapshot\": None,\n\t\t\"ffmpeg\": None,\n\t\t\"ffmpegThreads\": 1,\n\t\t\"bitrate\": \"5000k\",\n\t\t\"watermark\": True,\n\t\t\"flipH\": False,\n\t\t\"flipV\": False,\n\t\t\"rotate90\" : False,\n\t\t\"timelapse\": {\n\t\t\t\"type\": \"off\",\n\t\t\t\"options\": {},\n\t\t\t\"postRoll\": 0,\n\t\t\t\"fps\": 25\n\t\t}\n\t},\n\t\"gcodeViewer\": {\n\t\t\"enabled\": True,\n\t\t\"mobileSizeThreshold\": 2 * 1024 * 1024, # 2MB\n\t\t\"sizeThreshold\": 20 * 1024 * 1024, # 20MB\n\t},\n\t\"gcodeAnalysis\": {\n\t\t\"maxExtruders\": 10\n\t},\n\t\"feature\": {\n\t\t\"temperatureGraph\": True,\n\t\t\"waitForStartOnConnect\": False,\n\t\t\"alwaysSendChecksum\": False,\n\t\t\"sendChecksumWithUnknownCommands\": False,\n\t\t\"unknownCommandsNeedAck\": False,\n\t\t\"sdSupport\": True,\n\t\t\"sdAlwaysAvailable\": False,\n\t\t\"swallowOkAfterResend\": True,\n\t\t\"repetierTargetTemp\": False,\n\t\t\"externalHeatupDetection\": True,\n\t\t\"supportWait\": True,\n\t\t\"keyboardControl\": True,\n\t\t\"pollWatched\": False\n\t},\n\t\"folder\": {\n\t\t\"uploads\": None,\n\t\t\"timelapse\": None,\n\t\t\"timelapse_tmp\": None,\n\t\t\"logs\": None,\n\t\t\"virtualSd\": None,\n\t\t\"watched\": None,\n\t\t\"plugins\": None,\n\t\t\"slicingProfiles\": None,\n\t\t\"printerProfiles\": None,\n\t\t\"scripts\": None,\n\t\t\"translations\": None,\n\t\t\"generated\": None,\n\t\t\"data\": None\n\t},\n\t\"temperature\": {\n\t\t\"profiles\": [\n\t\t\t{\"name\": \"ABS\", \"extruder\" : 210, \"bed\" : 100 },\n\t\t\t{\"name\": \"PLA\", \"extruder\" : 180, \"bed\" : 60 }\n\t\t],\n\t\t\"cutoff\": 30\n\t},\n\t\"printerProfiles\": {\n\t\t\"default\": None,\n\t\t\"defaultProfile\": {}\n\t},\n\t\"printerParameters\": {\n\t\t\"pauseTriggers\": [],\n\t\t\"defaultExtrusionLength\": 5\n\t},\n\t\"appearance\": {\n\t\t\"name\": \"\",\n\t\t\"color\": \"default\",\n\t\t\"colorTransparent\": False,\n\t\t\"defaultLanguage\": \"_default\",\n\t\t\"components\": {\n\t\t\t\"order\": {\n\t\t\t\t\"navbar\": [\"settings\", \"systemmenu\", \"login\"],\n\t\t\t\t\"sidebar\": [\"connection\", \"state\", \"files\"],\n\t\t\t\t\"tab\": [\"temperature\", \"control\", \"gcodeviewer\", \"terminal\", \"timelapse\"],\n\t\t\t\t\"settings\": [\n\t\t\t\t\t\"section_printer\", \"serial\", \"printerprofiles\", \"temperatures\", \"terminalfilters\", \"gcodescripts\",\n\t\t\t\t\t\"section_features\", \"features\", \"webcam\", \"accesscontrol\", \"api\",\n\t\t\t\t\t\"section_octoprint\", \"folders\", \"appearance\", \"logs\", \"plugin_pluginmanager\", \"plugin_softwareupdate\"\n\t\t\t\t],\n\t\t\t\t\"usersettings\": [\"access\", \"interface\"],\n\t\t\t\t\"generic\": []\n\t\t\t},\n\t\t\t\"disabled\": {\n\t\t\t\t\"navbar\": [],\n\t\t\t\t\"sidebar\": [],\n\t\t\t\t\"tab\": [],\n\t\t\t\t\"settings\": [],\n\t\t\t\t\"usersettings\": [],\n\t\t\t\t\"generic\": []\n\t\t\t}\n\t\t}\n\t},\n\t\"controls\": [],\n\t\"system\": {\n\t\t\"actions\": []\n\t},\n\t\"accessControl\": {\n\t\t\"enabled\": True,\n\t\t\"salt\": None,\n\t\t\"userManager\": \"octoprint.users.FilebasedUserManager\",\n\t\t\"userfile\": None,\n\t\t\"autologinLocal\": False,\n\t\t\"localNetworks\": [\"127.0.0.0/8\"],\n\t\t\"autologinAs\": None\n\t},\n\t\"slicing\": {\n\t\t\"enabled\": True,\n\t\t\"defaultSlicer\": \"cura\",\n\t\t\"defaultProfiles\": None\n\t},\n\t\"events\": {\n\t\t\"enabled\": True,\n\t\t\"subscriptions\": []\n\t},\n\t\"api\": {\n\t\t\"enabled\": True,\n\t\t\"key\": None,\n\t\t\"allowCrossOrigin\": False,\n\t\t\"apps\": {}\n\t},\n\t\"terminalFilters\": [\n\t\t{ \"name\": \"Suppress M105 requests/responses\", \"regex\": \"(Send: M105)|(Recv: ok (B|T\\d*):)\" },\n\t\t{ \"name\": \"Suppress M27 requests/responses\", \"regex\": \"(Send: M27)|(Recv: SD printing byte)\" }\n\t],\n\t\"plugins\": {\n\t\t\"_disabled\": []\n\t},\n\t\"scripts\": {\n\t\t\"gcode\": {\n\t\t\t\"afterPrintCancelled\": \"; disable motors\\nM84\\n\\n;disable all heaters\\n{% snippet 'disable_hotends' %}\\nM140 S0\\n\\n;disable fan\\nM106 S0\",\n\t\t\t\"snippets\": {\n\t\t\t\t\"disable_hotends\": \"{% for tool in range(printer_profile.extruder.count) %}M104 T{{ tool }} S0\\n{% endfor %}\"\n\t\t\t}\n\t\t}\n\t},\n\t\"devel\": {\n\t\t\"stylesheet\": \"css\",\n\t\t\"cache\": {\n\t\t\t\"enabled\": True\n\t\t},\n\t\t\"webassets\": {\n\t\t\t\"minify\": False,\n\t\t\t\"bundle\": True,\n\t\t\t\"clean_on_startup\": True\n\t\t},\n\t\t\"virtualPrinter\": {\n\t\t\t\"enabled\": False,\n\t\t\t\"okAfterResend\": False,\n\t\t\t\"forceChecksum\": False,\n\t\t\t\"okWithLinenumber\": False,\n\t\t\t\"numExtruders\": 1,\n\t\t\t\"includeCurrentToolInTemps\": True,\n\t\t\t\"movementSpeed\": {\n\t\t\t\t\"x\": 6000,\n\t\t\t\t\"y\": 6000,\n\t\t\t\t\"z\": 200,\n\t\t\t\t\"e\": 300\n\t\t\t},\n\t\t\t\"hasBed\": True,\n\t\t\t\"repetierStyleTargetTemperature\": False,\n\t\t\t\"okBeforeCommandOutput\": False,\n\t\t\t\"smoothieTemperatureReporting\": False,\n\t\t\t\"extendedSdFileList\": False,\n\t\t\t\"throttle\": 0.01,\n\t\t\t\"waitOnLongMoves\": False,\n\t\t\t\"rxBuffer\": 64,\n\t\t\t\"txBuffer\": 40,\n\t\t\t\"commandBuffer\": 4,\n\t\t\t\"sendWait\": True,\n\t\t\t\"waitInterval\": 1.0\n\t\t}\n\t}\n}\n\"\"\"The default settings of the core application.\"\"\"\n\nvalid_boolean_trues = [True, \"true\", \"yes\", \"y\", \"1\"]\n\"\"\" Values that are considered to be equivalent to the boolean ``True`` value, used for type conversion in various places.\"\"\"\n\nclass Settings(object):\n\t\"\"\"\n\tThe :class:`Settings` class allows managing all of OctoPrint's settings. It takes care of initializing the settings\n\tdirectory, loading the configuration from ``config.yaml``, persisting changes to disk etc and provides access\n\tmethods for getting and setting specific values from the overall settings structure via paths.\n\n\tA general word on the concept of paths, since they play an important role in OctoPrint's settings management. A\n\tpath is basically a list or tuple consisting of keys to follow down into the settings (which are basically like\n\ta ``dict``) in order to set or retrieve a specific value (or more than one). For example, for a settings\n\tstructure like the following::\n\n\t serial:\n\t port: \"/dev/ttyACM0\"\n\t baudrate: 250000\n\t timeouts:\n\t communication: 20.0\n\t temperature: 5.0\n\t sdStatus: 1.0\n\t connection: 10.0\n\t server:\n\t host: \"0.0.0.0\"\n\t port: 5000\n\n\tthe following paths could be used:\n\n\t========================================== ============================================================================\n\tPath Value\n\t========================================== ============================================================================\n\t``[\"serial\", \"port\"]`` ::\n\n\t \"/dev/ttyACM0\"\n\n\t``[\"serial\", \"timeouts\"]`` ::\n\n\t communication: 20.0\n\t temperature: 5.0\n\t sdStatus: 1.0\n\t connection: 10.0\n\n\t``[\"serial\", \"timeouts\", \"temperature\"]`` ::\n\n\t 5.0\n\n\t``[\"server\", \"port\"]`` ::\n\n\t 5000\n\n\t========================================== ============================================================================\n\n\tHowever, these would be invalid paths: ``[\"key\"]``, ``[\"serial\", \"port\", \"value\"]``, ``[\"server\", \"host\", 3]``.\n\t\"\"\"\n\n\tdef __init__(self, configfile=None, basedir=None):\n\t\tself._logger = logging.getLogger(__name__)\n\n\t\tself._basedir = None\n\n\t\tself._config = None\n\t\tself._dirty = False\n\t\tself._mtime = None\n\n\t\tself._get_preprocessors = dict(\n\t\t\tcontrols=self._process_custom_controls\n\t\t)\n\t\tself._set_preprocessors = dict()\n\n\t\tself._init_basedir(basedir)\n\n\t\tif configfile is not None:\n\t\t\tself._configfile = configfile\n\t\telse:\n\t\t\tself._configfile = os.path.join(self._basedir, \"config.yaml\")\n\t\tself.load(migrate=True)\n\n\t\tif self.get([\"api\", \"key\"]) is None:\n\t\t\tself.set([\"api\", \"key\"], ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes))\n\t\t\tself.save(force=True)\n\n\t\tself._script_env = self._init_script_templating()\n\n\tdef _init_basedir(self, basedir):\n\t\tif basedir is not None:\n\t\t\tself._basedir = basedir\n\t\telse:\n\t\t\tself._basedir = _default_basedir(_APPNAME)\n\n\t\tif not os.path.isdir(self._basedir):\n\t\t\tos.makedirs(self._basedir)\n\n\tdef _get_default_folder(self, type):\n\t\tfolder = default_settings[\"folder\"][type]\n\t\tif folder is None:\n\t\t\tfolder = os.path.join(self._basedir, type.replace(\"_\", os.path.sep))\n\t\treturn folder\n\n\tdef _init_script_templating(self):\n\t\tfrom jinja2 import Environment, BaseLoader, FileSystemLoader, ChoiceLoader, TemplateNotFound\n\t\tfrom jinja2.nodes import Include, Const\n\t\tfrom jinja2.ext import Extension\n\n\t\tclass SnippetExtension(Extension):\n\t\t\ttags = {\"snippet\"}\n\t\t\tfields = Include.fields\n\n\t\t\tdef parse(self, parser):\n\t\t\t\tnode = parser.parse_include()\n\t\t\t\tif not node.template.value.startswith(\"/\"):\n\t\t\t\t\tnode.template.value = \"snippets/\" + node.template.value\n\t\t\t\treturn node\n\n\t\tclass SettingsScriptLoader(BaseLoader):\n\t\t\tdef __init__(self, s):\n\t\t\t\tself._settings = s\n\n\t\t\tdef get_source(self, environment, template):\n\t\t\t\tparts = template.split(\"/\")\n\t\t\t\tif not len(parts):\n\t\t\t\t\traise TemplateNotFound(template)\n\n\t\t\t\tscript = self._settings.get([\"scripts\"], merged=True)\n\t\t\t\tfor part in parts:\n\t\t\t\t\tif isinstance(script, dict) and part in script:\n\t\t\t\t\t\tscript = script[part]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\tsource = script\n\t\t\t\tif source is None:\n\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\tmtime = self._settings._mtime\n\t\t\t\treturn source, None, lambda: mtime == self._settings.last_modified\n\n\t\t\tdef list_templates(self):\n\t\t\t\tscripts = self._settings.get([\"scripts\"], merged=True)\n\t\t\t\treturn self._get_templates(scripts)\n\n\t\t\tdef _get_templates(self, scripts):\n\t\t\t\ttemplates = []\n\t\t\t\tfor key in scripts:\n\t\t\t\t\tif isinstance(scripts[key], dict):\n\t\t\t\t\t\ttemplates += map(lambda x: key + \"/\" + x, self._get_templates(scripts[key]))\n\t\t\t\t\telif isinstance(scripts[key], basestring):\n\t\t\t\t\t\ttemplates.append(key)\n\t\t\t\treturn templates\n\n\t\tclass SelectLoader(BaseLoader):\n\t\t\tdef __init__(self, default, mapping, sep=\":\"):\n\t\t\t\tself._default = default\n\t\t\t\tself._mapping = mapping\n\t\t\t\tself._sep = sep\n\n\t\t\tdef get_source(self, environment, template):\n\t\t\t\tif self._sep in template:\n\t\t\t\t\tprefix, name = template.split(self._sep, 1)\n\t\t\t\t\tif not prefix in self._mapping:\n\t\t\t\t\t\traise TemplateNotFound(template)\n\t\t\t\t\treturn self._mapping[prefix].get_source(environment, name)\n\t\t\t\treturn self._default.get_source(environment, template)\n\n\t\t\tdef list_templates(self):\n\t\t\t\treturn self._default.list_templates()\n\n\t\tclass RelEnvironment(Environment):\n\t\t\tdef __init__(self, prefix_sep=\":\", *args, **kwargs):\n\t\t\t\tEnvironment.__init__(self, *args, **kwargs)\n\t\t\t\tself._prefix_sep = prefix_sep\n\n\t\t\tdef join_path(self, template, parent):\n\t\t\t\tprefix, name = self._split_prefix(template)\n\n\t\t\t\tif name.startswith(\"/\"):\n\t\t\t\t\treturn self._join_prefix(prefix, name[1:])\n\t\t\t\telse:\n\t\t\t\t\t_, parent_name = self._split_prefix(parent)\n\t\t\t\t\tparent_base = parent_name.split(\"/\")[:-1]\n\t\t\t\t\treturn self._join_prefix(prefix, \"/\".join(parent_base) + \"/\" + name)\n\n\t\t\tdef _split_prefix(self, template):\n\t\t\t\tif self._prefix_sep in template:\n\t\t\t\t\treturn template.split(self._prefix_sep, 1)\n\t\t\t\telse:\n\t\t\t\t\treturn \"\", template\n\n\t\t\tdef _join_prefix(self, prefix, template):\n\t\t\t\tif len(prefix):\n\t\t\t\t\treturn prefix + self._prefix_sep + template\n\t\t\t\telse:\n\t\t\t\t\treturn template\n\n\t\tfile_system_loader = FileSystemLoader(self.getBaseFolder(\"scripts\"))\n\t\tsettings_loader = SettingsScriptLoader(self)\n\t\tchoice_loader = ChoiceLoader([file_system_loader, settings_loader])\n\t\tselect_loader = SelectLoader(choice_loader, dict(bundled=settings_loader, file=file_system_loader))\n\t\treturn RelEnvironment(loader=select_loader, extensions=[SnippetExtension])\n\n\tdef _get_script_template(self, script_type, name, source=False):\n\t\tfrom jinja2 import TemplateNotFound\n\n\t\ttemplate_name = script_type + \"/\" + name\n\t\ttry:\n\t\t\tif source:\n\t\t\t\ttemplate_name, _, _ = self._script_env.loader.get_source(self._script_env, template_name)\n\t\t\t\treturn template_name\n\t\t\telse:\n\t\t\t\treturn self._script_env.get_template(template_name)\n\t\texcept TemplateNotFound:\n\t\t\treturn None\n\t\texcept:\n\t\t\tself._logger.exception(\"Exception while trying to resolve template {template_name}\".format(**locals()))\n\t\t\treturn None\n\n\tdef _get_scripts(self, script_type):\n\t\treturn self._script_env.list_templates(filter_func=lambda x: x.startswith(script_type+\"/\"))\n\n\tdef _process_custom_controls(self, controls):\n\t\tdef process_control(c):\n\t\t\t# shallow copy\n\t\t\tresult = dict(c)\n\n\t\t\tif \"regex\" in result and \"template\" in result:\n\t\t\t\t# if it's a template matcher, we need to add a key to associate with the matcher output\n\t\t\t\timport hashlib\n\t\t\t\tkey_hash = hashlib.md5()\n\t\t\t\tkey_hash.update(result[\"regex\"])\n\t\t\t\tresult[\"key\"] = key_hash.hexdigest()\n\n\t\t\t\ttemplate_key_hash = hashlib.md5()\n\t\t\t\ttemplate_key_hash.update(result[\"template\"])\n\t\t\t\tresult[\"template_key\"] = template_key_hash.hexdigest()\n\n\t\t\telif \"children\" in result:\n\t\t\t\t# if it has children we need to process them recursively\n\t\t\t\tresult[\"children\"] = map(process_control, [child for child in result[\"children\"] if child is not None])\n\n\t\t\treturn result\n\n\t\treturn map(process_control, controls)\n\n\t@property\n\tdef effective(self):\n\t\timport octoprint.util\n\t\treturn octoprint.util.dict_merge(default_settings, self._config)\n\n\t@property\n\tdef effective_yaml(self):\n\t\timport yaml\n\t\treturn yaml.safe_dump(self.effective)\n\n\t#~~ load and save\n\n\tdef load(self, migrate=False):\n\t\tif os.path.exists(self._configfile) and os.path.isfile(self._configfile):\n\t\t\twith open(self._configfile, \"r\") as f:\n\t\t\t\tself._config = yaml.safe_load(f)\n\t\t\t\tself._mtime = self.last_modified\n\t\t# changed from else to handle cases where the file exists, but is empty / 0 bytes\n\t\tif not self._config:\n\t\t\tself._config = {}\n\n\t\tif migrate:\n\t\t\tself._migrate_config()\n\n\tdef _migrate_config(self):\n\t\tdirty = False\n\n\t\tmigrators = (\n\t\t\tself._migrate_event_config,\n\t\t\tself._migrate_reverse_proxy_config,\n\t\t\tself._migrate_printer_parameters,\n\t\t\tself._migrate_gcode_scripts\n\t\t)\n\n\t\tfor migrate in migrators:\n\t\t\tdirty = migrate() or dirty\n\t\tif dirty:\n\t\t\tself.save(force=True)\n\n\tdef _migrate_gcode_scripts(self):\n\t\t\"\"\"\n\t\tMigrates an old development version of gcode scripts to the new template based format.\n\t\t\"\"\"\n\n\t\tdirty = False\n\t\tif \"scripts\" in self._config:\n\t\t\tif \"gcode\" in self._config[\"scripts\"]:\n\t\t\t\tif \"templates\" in self._config[\"scripts\"][\"gcode\"]:\n\t\t\t\t\tdel self._config[\"scripts\"][\"gcode\"][\"templates\"]\n\n\t\t\t\treplacements = dict(\n\t\t\t\t\tdisable_steppers=\"M84\",\n\t\t\t\t\tdisable_hotends=\"{% snippet 'disable_hotends' %}\",\n\t\t\t\t\tdisable_bed=\"M140 S0\",\n\t\t\t\t\tdisable_fan=\"M106 S0\"\n\t\t\t\t)\n\n\t\t\t\tfor name, script in self._config[\"scripts\"][\"gcode\"].items():\n\t\t\t\t\tself.saveScript(\"gcode\", name, script.format(**replacements))\n\t\t\tdel self._config[\"scripts\"]\n\t\t\tdirty = True\n\t\treturn dirty\n\n\tdef _migrate_printer_parameters(self):\n\t\t\"\"\"\n\t\tMigrates the old \"printer > parameters\" data structure to the new printer profile mechanism.\n\t\t\"\"\"\n\t\tdefault_profile = self._config[\"printerProfiles\"][\"defaultProfile\"] if \"printerProfiles\" in self._config and \"defaultProfile\" in self._config[\"printerProfiles\"] else dict()\n\t\tdirty = False\n\n\t\tif \"printerParameters\" in self._config:\n\t\t\tprinter_parameters = self._config[\"printerParameters\"]\n\n\t\t\tif \"movementSpeed\" in printer_parameters or \"invertAxes\" in printer_parameters:\n\t\t\t\tdefault_profile[\"axes\"] = dict(x=dict(), y=dict(), z=dict(), e=dict())\n\t\t\t\tif \"movementSpeed\" in printer_parameters:\n\t\t\t\t\tfor axis in (\"x\", \"y\", \"z\", \"e\"):\n\t\t\t\t\t\tif axis in printer_parameters[\"movementSpeed\"]:\n\t\t\t\t\t\t\tdefault_profile[\"axes\"][axis][\"speed\"] = printer_parameters[\"movementSpeed\"][axis]\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"movementSpeed\"]\n\t\t\t\tif \"invertedAxes\" in printer_parameters:\n\t\t\t\t\tfor axis in (\"x\", \"y\", \"z\", \"e\"):\n\t\t\t\t\t\tif axis in printer_parameters[\"invertedAxes\"]:\n\t\t\t\t\t\t\tdefault_profile[\"axes\"][axis][\"inverted\"] = True\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"invertedAxes\"]\n\n\t\t\tif \"numExtruders\" in printer_parameters or \"extruderOffsets\" in printer_parameters:\n\t\t\t\tif not \"extruder\" in default_profile:\n\t\t\t\t\tdefault_profile[\"extruder\"] = dict()\n\n\t\t\t\tif \"numExtruders\" in printer_parameters:\n\t\t\t\t\tdefault_profile[\"extruder\"][\"count\"] = printer_parameters[\"numExtruders\"]\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"numExtruders\"]\n\t\t\t\tif \"extruderOffsets\" in printer_parameters:\n\t\t\t\t\textruder_offsets = []\n\t\t\t\t\tfor offset in printer_parameters[\"extruderOffsets\"]:\n\t\t\t\t\t\tif \"x\" in offset and \"y\" in offset:\n\t\t\t\t\t\t\textruder_offsets.append((offset[\"x\"], offset[\"y\"]))\n\t\t\t\t\tdefault_profile[\"extruder\"][\"offsets\"] = extruder_offsets\n\t\t\t\t\tdel self._config[\"printerParameters\"][\"extruderOffsets\"]\n\n\t\t\tif \"bedDimensions\" in printer_parameters:\n\t\t\t\tbed_dimensions = printer_parameters[\"bedDimensions\"]\n\t\t\t\tif not \"volume\" in default_profile:\n\t\t\t\t\tdefault_profile[\"volume\"] = dict()\n\n\t\t\t\tif \"circular\" in bed_dimensions and \"r\" in bed_dimensions and bed_dimensions[\"circular\"]:\n\t\t\t\t\tdefault_profile[\"volume\"][\"formFactor\"] = \"circular\"\n\t\t\t\t\tdefault_profile[\"volume\"][\"width\"] = 2 * bed_dimensions[\"r\"]\n\t\t\t\t\tdefault_profile[\"volume\"][\"depth\"] = default_profile[\"volume\"][\"width\"]\n\t\t\t\telif \"x\" in bed_dimensions or \"y\" in bed_dimensions:\n\t\t\t\t\tdefault_profile[\"volume\"][\"formFactor\"] = \"rectangular\"\n\t\t\t\t\tif \"x\" in bed_dimensions:\n\t\t\t\t\t\tdefault_profile[\"volume\"][\"width\"] = bed_dimensions[\"x\"]\n\t\t\t\t\tif \"y\" in bed_dimensions:\n\t\t\t\t\t\tdefault_profile[\"volume\"][\"depth\"] = bed_dimensions[\"y\"]\n\t\t\t\tdel self._config[\"printerParameters\"][\"bedDimensions\"]\n\n\t\t\tdirty = True\n\n\t\tif dirty:\n\t\t\tif not \"printerProfiles\" in self._config:\n\t\t\t\tself._config[\"printerProfiles\"] = dict()\n\t\t\tself._config[\"printerProfiles\"][\"defaultProfile\"] = default_profile\n\t\treturn dirty\n\n\tdef _migrate_reverse_proxy_config(self):\n\t\t\"\"\"\n\t\tMigrates the old \"server > baseUrl\" and \"server > scheme\" configuration entries to\n\t\t\"server > reverseProxy > prefixFallback\" and \"server > reverseProxy > schemeFallback\".\n\t\t\"\"\"\n\t\tif \"server\" in self._config.keys() and (\"baseUrl\" in self._config[\"server\"] or \"scheme\" in self._config[\"server\"]):\n\t\t\tprefix = \"\"\n\t\t\tif \"baseUrl\" in self._config[\"server\"]:\n\t\t\t\tprefix = self._config[\"server\"][\"baseUrl\"]\n\t\t\t\tdel self._config[\"server\"][\"baseUrl\"]\n\n\t\t\tscheme = \"\"\n\t\t\tif \"scheme\" in self._config[\"server\"]:\n\t\t\t\tscheme = self._config[\"server\"][\"scheme\"]\n\t\t\t\tdel self._config[\"server\"][\"scheme\"]\n\n\t\t\tif not \"reverseProxy\" in self._config[\"server\"] or not isinstance(self._config[\"server\"][\"reverseProxy\"], dict):\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"] = dict()\n\t\t\tif prefix:\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"][\"prefixFallback\"] = prefix\n\t\t\tif scheme:\n\t\t\t\tself._config[\"server\"][\"reverseProxy\"][\"schemeFallback\"] = scheme\n\t\t\tself._logger.info(\"Migrated reverse proxy configuration to new structure\")\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef _migrate_event_config(self):\n\t\t\"\"\"\n\t\tMigrates the old event configuration format of type \"events > gcodeCommandTrigger\" and\n\t\t\"event > systemCommandTrigger\" to the new events format.\n\t\t\"\"\"\n\t\tif \"events\" in self._config.keys() and (\"gcodeCommandTrigger\" in self._config[\"events\"] or \"systemCommandTrigger\" in self._config[\"events\"]):\n\t\t\tself._logger.info(\"Migrating config (event subscriptions)...\")\n\n\t\t\t# migrate event hooks to new format\n\t\t\tplaceholderRe = re.compile(\"%\\((.*?)\\)s\")\n\n\t\t\teventNameReplacements = {\n\t\t\t\t\"ClientOpen\": \"ClientOpened\",\n\t\t\t\t\"TransferStart\": \"TransferStarted\"\n\t\t\t}\n\t\t\tpayloadDataReplacements = {\n\t\t\t\t\"Upload\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"Connected\": {\"data\": \"{port} at {baudrate} baud\"},\n\t\t\t\t\"FileSelected\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"TransferStarted\": {\"data\": \"{remote}\", \"filename\": \"{remote}\"},\n\t\t\t\t\"TransferDone\": {\"data\": \"{remote}\", \"filename\": \"{remote}\"},\n\t\t\t\t\"ZChange\": {\"data\": \"{new}\"},\n\t\t\t\t\"CaptureStart\": {\"data\": \"{file}\"},\n\t\t\t\t\"CaptureDone\": {\"data\": \"{file}\"},\n\t\t\t\t\"MovieDone\": {\"data\": \"{movie}\", \"filename\": \"{gcode}\"},\n\t\t\t\t\"Error\": {\"data\": \"{error}\"},\n\t\t\t\t\"PrintStarted\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t\t\"PrintDone\": {\"data\": \"{file}\", \"filename\": \"{file}\"},\n\t\t\t}\n\n\t\t\tdef migrateEventHook(event, command):\n\t\t\t\t# migrate placeholders\n\t\t\t\tcommand = placeholderRe.sub(\"{__\\\\1}\", command)\n\n\t\t\t\t# migrate event names\n\t\t\t\tif event in eventNameReplacements:\n\t\t\t\t\tevent = eventNameReplacements[\"event\"]\n\n\t\t\t\t# migrate payloads to more specific placeholders\n\t\t\t\tif event in payloadDataReplacements:\n\t\t\t\t\tfor key in payloadDataReplacements[event]:\n\t\t\t\t\t\tcommand = command.replace(\"{__%s}\" % key, payloadDataReplacements[event][key])\n\n\t\t\t\t# return processed tuple\n\t\t\t\treturn event, command\n\n\t\t\tdisableSystemCommands = False\n\t\t\tif \"systemCommandTrigger\" in self._config[\"events\"] and \"enabled\" in self._config[\"events\"][\"systemCommandTrigger\"]:\n\t\t\t\tdisableSystemCommands = not self._config[\"events\"][\"systemCommandTrigger\"][\"enabled\"]\n\n\t\t\tdisableGcodeCommands = False\n\t\t\tif \"gcodeCommandTrigger\" in self._config[\"events\"] and \"enabled\" in self._config[\"events\"][\"gcodeCommandTrigger\"]:\n\t\t\t\tdisableGcodeCommands = not self._config[\"events\"][\"gcodeCommandTrigger\"][\"enabled\"]\n\n\t\t\tdisableAllCommands = disableSystemCommands and disableGcodeCommands\n\t\t\tnewEvents = {\n\t\t\t\t\"enabled\": not disableAllCommands,\n\t\t\t\t\"subscriptions\": []\n\t\t\t}\n\n\t\t\tif \"systemCommandTrigger\" in self._config[\"events\"] and \"subscriptions\" in self._config[\"events\"][\"systemCommandTrigger\"]:\n\t\t\t\tfor trigger in self._config[\"events\"][\"systemCommandTrigger\"][\"subscriptions\"]:\n\t\t\t\t\tif not (\"event\" in trigger and \"command\" in trigger):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnewTrigger = {\"type\": \"system\"}\n\t\t\t\t\tif disableSystemCommands and not disableAllCommands:\n\t\t\t\t\t\tnewTrigger[\"enabled\"] = False\n\n\t\t\t\t\tnewTrigger[\"event\"], newTrigger[\"command\"] = migrateEventHook(trigger[\"event\"], trigger[\"command\"])\n\t\t\t\t\tnewEvents[\"subscriptions\"].append(newTrigger)\n\n\t\t\tif \"gcodeCommandTrigger\" in self._config[\"events\"] and \"subscriptions\" in self._config[\"events\"][\"gcodeCommandTrigger\"]:\n\t\t\t\tfor trigger in self._config[\"events\"][\"gcodeCommandTrigger\"][\"subscriptions\"]:\n\t\t\t\t\tif not (\"event\" in trigger and \"command\" in trigger):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tnewTrigger = {\"type\": \"gcode\"}\n\t\t\t\t\tif disableGcodeCommands and not disableAllCommands:\n\t\t\t\t\t\tnewTrigger[\"enabled\"] = False\n\n\t\t\t\t\tnewTrigger[\"event\"], newTrigger[\"command\"] = migrateEventHook(trigger[\"event\"], trigger[\"command\"])\n\t\t\t\t\tnewTrigger[\"command\"] = newTrigger[\"command\"].split(\",\")\n\t\t\t\t\tnewEvents[\"subscriptions\"].append(newTrigger)\n\n\t\t\tself._config[\"events\"] = newEvents\n\t\t\tself._logger.info(\"Migrated %d event subscriptions to new format and structure\" % len(newEvents[\"subscriptions\"]))\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef save(self, force=False):\n\t\tif not self._dirty and not force:\n\t\t\treturn False\n\n\t\twith open(self._configfile, \"wb\") as configFile:\n\t\t\tyaml.safe_dump(self._config, configFile, default_flow_style=False, indent=\" \", allow_unicode=True)\n\t\t\tself._dirty = False\n\t\tself.load()\n\t\treturn True\n\n\t@property\n\tdef last_modified(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t int: The last modification time of the configuration file.\n\t\t\"\"\"\n\t\tstat = os.stat(self._configfile)\n\t\treturn stat.st_mtime\n\n\t#~~ getter\n\n\tdef get(self, path, asdict=False, config=None, defaults=None, preprocessors=None, merged=False, incl_defaults=True):\n\t\timport octoprint.util as util\n\n\t\tif len(path) == 0:\n\t\t\treturn None\n\n\t\tif config is None:\n\t\t\tconfig = self._config\n\t\tif defaults is None:\n\t\t\tdefaults = default_settings\n\t\tif preprocessors is None:\n\t\t\tpreprocessors = self._get_preprocessors\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config and key in defaults:\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif incl_defaults and key in defaults:\n\t\t\t\tconfig = {}\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors:\n\t\t\t\tpreprocessors = preprocessors[key]\n\n\n\t\tk = path.pop(0)\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tkeys = [k]\n\t\telse:\n\t\t\tkeys = k\n\n\t\tif asdict:\n\t\t\tresults = {}\n\t\telse:\n\t\t\tresults = []\n\t\tfor key in keys:\n\t\t\tif key in config:\n\t\t\t\tvalue = config[key]\n\t\t\t\tif merged and key in defaults:\n\t\t\t\t\tvalue = util.dict_merge(defaults[key], value)\n\t\t\telif incl_defaults and key in defaults:\n\t\t\t\tvalue = defaults[key]\n\t\t\telse:\n\t\t\t\tvalue = None\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors and callable(preprocessors[key]):\n\t\t\t\tvalue = preprocessors[key](value)\n\n\t\t\tif asdict:\n\t\t\t\tresults[key] = value\n\t\t\telse:\n\t\t\t\tresults.append(value)\n\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tif asdict:\n\t\t\t\treturn results.values().pop()\n\t\t\telse:\n\t\t\t\treturn results.pop()\n\t\telse:\n\t\t\treturn results\n\n\tdef getInt(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getFloat(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getBoolean(self, path, config=None, defaults=None, preprocessors=None, incl_defaults=True):\n\t\tvalue = self.get(path, config=config, defaults=defaults, preprocessors=preprocessors, incl_defaults=incl_defaults)\n\t\tif value is None:\n\t\t\treturn None\n\t\tif isinstance(value, bool):\n\t\t\treturn value\n\t\tif isinstance(value, (int, float)):\n\t\t\treturn value != 0\n\t\tif isinstance(value, (str, unicode)):\n\t\t\treturn value.lower() in valid_boolean_trues\n\t\treturn value is not None\n\n\tdef getBaseFolder(self, type, create=True):\n\t\tif type not in default_settings[\"folder\"].keys() + [\"base\"]:\n\t\t\treturn None\n\n\t\tif type == \"base\":\n\t\t\treturn self._basedir\n\n\t\tfolder = self.get([\"folder\", type])\n\t\tif folder is None:\n\t\t\tfolder = self._get_default_folder(type)\n\n\t\tif not os.path.isdir(folder):\n\t\t\tif create:\n\t\t\t\tos.makedirs(folder)\n\t\t\telse:\n\t\t\t\traise IOError(\"No such folder: {folder}\".format(folder=folder))\n\n\t\treturn folder\n\n\tdef listScripts(self, script_type):\n\t\treturn map(lambda x: x[len(script_type + \"/\"):], filter(lambda x: x.startswith(script_type + \"/\"), self._get_scripts(script_type)))\n\n\tdef loadScript(self, script_type, name, context=None, source=False):\n\t\tif context is None:\n\t\t\tcontext = dict()\n\t\tcontext.update(dict(script=dict(type=script_type, name=name)))\n\n\t\ttemplate = self._get_script_template(script_type, name, source=source)\n\t\tif template is None:\n\t\t\treturn None\n\n\t\tif source:\n\t\t\tscript = template\n\t\telse:\n\t\t\ttry:\n\t\t\t\tscript = template.render(**context)\n\t\t\texcept:\n\t\t\t\tself._logger.exception(\"Exception while trying to render script {script_type}:{name}\".format(**locals()))\n\t\t\t\treturn None\n\n\t\treturn script\n\n\t#~~ setter\n\n\tdef set(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif len(path) == 0:\n\t\t\treturn\n\n\t\tif self._mtime is not None and self.last_modified != self._mtime:\n\t\t\tself.load()\n\n\t\tif config is None:\n\t\t\tconfig = self._config\n\t\tif defaults is None:\n\t\t\tdefaults = default_settings\n\t\tif preprocessors is None:\n\t\t\tpreprocessors = self._set_preprocessors\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig[key] = {}\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn\n\n\t\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors:\n\t\t\t\tpreprocessors = preprocessors[key]\n\n\t\tkey = path.pop(0)\n\n\t\tif preprocessors and isinstance(preprocessors, dict) and key in preprocessors and callable(preprocessors[key]):\n\t\t\tvalue = preprocessors[key](value)\n\n\t\tif not force and key in defaults and key in config and defaults[key] == value:\n\t\t\tdel config[key]\n\t\t\tself._dirty = True\n\t\telif force or (not key in config and defaults[key] != value) or (key in config and config[key] != value):\n\t\t\tif value is None and key in config:\n\t\t\t\tdel config[key]\n\t\t\telse:\n\t\t\t\tconfig[key] = value\n\t\t\tself._dirty = True\n\n\tdef setInt(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None:\n\t\t\tself.set(path, None, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tintValue = int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, intValue, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setFloat(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None:\n\t\t\tself.set(path, None, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tfloatValue = float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, floatValue, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setBoolean(self, path, value, force=False, defaults=None, config=None, preprocessors=None):\n\t\tif value is None or isinstance(value, bool):\n\t\t\tself.set(path, value, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\telif value.lower() in valid_boolean_trues:\n\t\t\tself.set(path, True, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\t\telse:\n\t\t\tself.set(path, False, config=config, force=force, defaults=defaults, preprocessors=preprocessors)\n\n\tdef setBaseFolder(self, type, path, force=False):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tcurrentPath = self.getBaseFolder(type)\n\t\tdefaultPath = self._get_default_folder(type)\n\t\tif (path is None or path == defaultPath) and \"folder\" in self._config.keys() and type in self._config[\"folder\"].keys():\n\t\t\tdel self._config[\"folder\"][type]\n\t\t\tif not self._config[\"folder\"]:\n\t\t\t\tdel self._config[\"folder\"]\n\t\t\tself._dirty = True\n\t\telif (path != currentPath and path != defaultPath) or force:\n\t\t\tif not \"folder\" in self._config.keys():\n\t\t\t\tself._config[\"folder\"] = {}\n\t\t\tself._config[\"folder\"][type] = path\n\t\t\tself._dirty = True\n\n\tdef saveScript(self, script_type, name, script):\n\t\tscript_folder = self.getBaseFolder(\"scripts\")\n\t\tfilename = os.path.realpath(os.path.join(script_folder, script_type, name))\n\t\tif not filename.startswith(script_folder):\n\t\t\t# oops, jail break, that shouldn't happen\n\t\t\traise ValueError(\"Invalid script path to save to: {filename} (from {script_type}:{name})\".format(**locals()))\n\n\t\tpath, _ = os.path.split(filename)\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\twith open(filename, \"w+\") as f:\n\t\t\tf.write(script)\n\ndef _default_basedir(applicationName):\n\t# taken from http://stackoverflow.com/questions/1084697/how-do-i-store-desktop-application-data-in-a-cross-platform-way-for-python\n\tif sys.platform == \"darwin\":\n\t\tfrom AppKit import NSSearchPathForDirectoriesInDomains\n\t\t# http://developer.apple.com/DOCUMENTATION/Cocoa/Reference/Foundation/Miscellaneous/Foundation_Functions/Reference/reference.html#//apple_ref/c/func/NSSearchPathForDirectoriesInDomains\n\t\t# NSApplicationSupportDirectory = 14\n\t\t# NSUserDomainMask = 1\n\t\t# True for expanding the tilde into a fully qualified path\n\t\treturn os.path.join(NSSearchPathForDirectoriesInDomains(14, 1, True)[0], applicationName)\n\telif sys.platform == \"win32\":\n\t\treturn os.path.join(os.environ[\"APPDATA\"], applicationName)\n\telse:\n\t\treturn os.path.expanduser(os.path.join(\"~\", \".\" + applicationName.lower()))\n",
"path": "src/octoprint/settings.py"
}
] | diff --git a/src/octoprint/settings.py b/src/octoprint/settings.py
index f5fd85a235..982e932da3 100644
--- a/src/octoprint/settings.py
+++ b/src/octoprint/settings.py
@@ -83,7 +83,7 @@ def settings(init=False, basedir=None, configfile=None):
"sdStatus": 1
},
"additionalPorts": [],
- "longRunningCommands": ["G4", "G28", "G29", "G30", "G32"]
+ "longRunningCommands": ["G4", "G28", "G29", "G30", "G32", "M400", "M226"]
},
"server": {
"host": "0.0.0.0",
|
ibis-project__ibis-9088 | docs: improvements to the home page
The Ibis project home page is better than it once was [citation needed], but
can use some improvements. In particular, it'd be great if we could have an
[interactive demo similar to
DuckDB's](https://shell.duckdb.org/#queries=v0,%20%20-Create-table-from-Parquet-file%0ACREATE-TABLE-train_services-AS%0A----FROM-'s3%3A%2F%2Fduckdb%20blobs%2Ftrain_services.parquet'~,%20%20-Get-the-top%203-busiest-train-stations%0ASELECT-station_name%2C-count(*)-AS-num_services%0AFROM-train_services%0AGROUP-BY-ALL%0AORDER-BY-num_services-DESC%0ALIMIT-3~).
This would required [PyArrow in
Pyodide](https://github.com/pyodide/pyodide/issues/2933) as the last blocker, I
think.
Regardless, we should ensure the landing page answers to a new/prospective user:
- What is Ibis?
- Why should I use Ibis?
- Confidence Ibis is a well-supported, production-ready library
Unfortunately, this may involve more HTML/CSS than I'm conformtable doing but
we'll figure something out.
| [
{
"content": "from __future__ import annotations\n\nimport plotly.graph_objects as go\n\n\ndef to_greyish(hex_code, grey_value=128):\n hex_code = hex_code.lstrip(\"#\")\n r, g, b = int(hex_code[0:2], 16), int(hex_code[2:4], 16), int(hex_code[4:6], 16)\n\n new_r = (r + grey_value) // 2\n new_g = (g + grey_value) // 2\n new_b = (b + grey_value) // 2\n\n new_hex_code = f\"#{new_r:02x}{new_g:02x}{new_b:02x}\"\n\n return new_hex_code\n\n\ncategory_colors = {\n \"Ibis API\": \"#7C65A0\",\n \"SQL\": \"#6A9BC9\",\n \"DataFrame\": \"#D58273\",\n}\n\nbackend_categories = {\n list(category_colors.keys())[1]: [\n \"BigQuery\",\n \"ClickHouse\",\n \"DataFusion\",\n \"Druid\",\n \"DuckDB\",\n \"Exasol\",\n \"Flink\",\n \"Impala\",\n \"MSSQL\",\n \"MySQL\",\n \"Oracle\",\n \"PostgreSQL\",\n \"PySpark\",\n \"RisingWave\",\n \"Snowflake\",\n \"SQLite\",\n \"Trino\",\n ],\n list(category_colors.keys())[2]: [\"Dask\", \"pandas\", \"Polars\"],\n}\n\nnodes, links = [], []\nnode_index = {}\n\nnodes.append({\"label\": \"Ibis API\", \"color\": category_colors[\"Ibis API\"]})\nnode_index[\"Ibis API\"] = 0\n\nidx = 1\nfor category, backends in backend_categories.items():\n nodes.append({\"label\": category, \"color\": category_colors[category]})\n node_index[category] = idx\n links.append({\"source\": 0, \"target\": idx, \"value\": len(backends)})\n idx += 1\n\n for backend in backends:\n if backend not in node_index:\n nodes.append({\"label\": backend, \"color\": category_colors[category]})\n node_index[backend] = idx\n idx += 1\n links.append(\n {\n \"source\": node_index[category],\n \"target\": node_index[backend],\n \"value\": 1,\n }\n )\n\nfig = go.Figure(\n data=[\n go.Sankey(\n node=dict(\n pad=20,\n thickness=20,\n line=dict(color=\"grey\", width=0.5),\n label=[node[\"label\"] for node in nodes],\n color=[node[\"color\"] for node in nodes],\n ),\n link=dict(\n source=[link[\"source\"] for link in links],\n target=[link[\"target\"] for link in links],\n value=[link[\"value\"] for link in links],\n line=dict(color=\"grey\", width=0.5),\n color=[to_greyish(nodes[link[\"target\"]][\"color\"]) for link in links],\n ),\n )\n ],\n)\n\nfig.update_layout(\n title_text=\"Ibis backend types\",\n font_size=24,\n # font_family=\"Arial\",\n title_font_size=30,\n margin=dict(l=30, r=30, t=80, b=30),\n template=\"plotly_dark\",\n)\n",
"path": "docs/backends_sankey.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport plotly.graph_objects as go\n\n\ndef to_greyish(hex_code, grey_value=128):\n hex_code = hex_code.lstrip(\"#\")\n r, g, b = int(hex_code[0:2], 16), int(hex_code[2:4], 16), int(hex_code[4:6], 16)\n\n new_r = (r + grey_value) // 2\n new_g = (g + grey_value) // 2\n new_b = (b + grey_value) // 2\n\n new_hex_code = f\"#{new_r:02x}{new_g:02x}{new_b:02x}\"\n\n return new_hex_code\n\n\ncategory_colors = {\n \"Ibis API\": \"#7C65A0\",\n \"SQL\": \"#6A9BC9\",\n \"DataFrame\": \"#D58273\",\n}\n\nbackend_categories = {\n list(category_colors.keys())[1]: [\n \"BigQuery\",\n \"ClickHouse\",\n \"DataFusion\",\n \"Druid\",\n \"DuckDB\",\n \"Exasol\",\n \"Flink\",\n \"Impala\",\n \"MSSQL\",\n \"MySQL\",\n \"Oracle\",\n \"PostgreSQL\",\n \"PySpark\",\n \"RisingWave\",\n \"Snowflake\",\n \"SQLite\",\n \"Trino\",\n ],\n list(category_colors.keys())[2]: [\"Dask\", \"pandas\", \"Polars\"],\n}\n\nnodes, links = [], []\nnode_index = {}\n\nnodes.append({\"label\": \"Ibis API\", \"color\": category_colors[\"Ibis API\"]})\nnode_index[\"Ibis API\"] = 0\n\nidx = 1\nfor category, backends in backend_categories.items():\n nodes.append({\"label\": category, \"color\": category_colors[category]})\n node_index[category] = idx\n links.append({\"source\": 0, \"target\": idx, \"value\": len(backends)})\n idx += 1\n\n for backend in backends:\n if backend not in node_index:\n nodes.append({\"label\": backend, \"color\": category_colors[category]})\n node_index[backend] = idx\n idx += 1\n links.append(\n {\n \"source\": node_index[category],\n \"target\": node_index[backend],\n \"value\": 1,\n }\n )\n\nfig = go.Figure(\n data=[\n go.Sankey(\n node=dict(\n pad=20,\n thickness=20,\n line=dict(color=\"grey\", width=0.5),\n label=[node[\"label\"] for node in nodes],\n color=[node[\"color\"] for node in nodes],\n ),\n link=dict(\n source=[link[\"source\"] for link in links],\n target=[link[\"target\"] for link in links],\n value=[link[\"value\"] for link in links],\n line=dict(color=\"grey\", width=0.5),\n color=[to_greyish(nodes[link[\"target\"]][\"color\"]) for link in links],\n ),\n )\n ],\n)\n\nfig.update_layout(\n title_text=\"Ibis backend types\",\n font_size=20,\n # font_family=\"Arial\",\n title_font_size=30,\n margin=dict(l=30, r=30, t=80, b=30),\n template=\"plotly_dark\",\n)\n",
"path": "docs/backends_sankey.py"
}
] | diff --git a/docs/backends_sankey.py b/docs/backends_sankey.py
index 9bc7270c988c..b04cea212faf 100644
--- a/docs/backends_sankey.py
+++ b/docs/backends_sankey.py
@@ -94,7 +94,7 @@ def to_greyish(hex_code, grey_value=128):
fig.update_layout(
title_text="Ibis backend types",
- font_size=24,
+ font_size=20,
# font_family="Arial",
title_font_size=30,
margin=dict(l=30, r=30, t=80, b=30),
diff --git a/docs/index.qmd b/docs/index.qmd
index 4f1688ebb158..5be4a9bd4dd8 100644
--- a/docs/index.qmd
+++ b/docs/index.qmd
@@ -40,513 +40,226 @@ about:
::: {#about}
:::
-## Install
+{{< pagebreak >}}
-We recommend starting with the default backend (DuckDB).
+::: {.column-page}
-```bash
-pip install 'ibis-framework[duckdb,examples]' # <1>
-```
-
-1. Install Ibis with the DuckDB backend along with examples.
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseBackends" role="button" aria-expanded="false" aria-controls="collapseBackends" margin="100px">Show supported backends</a></div>
-
-###
-
-::: {#collapseBackends .collapse .multi-collapse}
-
-## Backends
-
-Need to use Ibis with a backend that isn't currently supported? [Let us know!](https://github.com/ibis-project/ibis/discussions/new?category=q-a)
-
-{{< include ./_tabsets/install.qmd >}}
+### An open source dataframe library that works with any data system
-See the [backend support matrix](support_matrix.qmd) for details on operations supported. [Open a feature request](https://github.com/ibis-project/ibis/issues/new?assignees=&labels=feature&projects=&template=feature-request.yml&title=feat) if you'd like to see support for an operation in a given backend. If the backend supports it, we'll do our best to add it quickly!
+- Use the same API for 20+ backends
+- Fast local dataframes with embedded DuckDB (default), Polars, or DataFusion
+- Iterate locally and deploy remotely by changing a single line of code
+- Compose SQL and Python dataframe code, bridging the gap between data engineering and data science
-:::
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseQuickstart" role="button" aria-expanded="false" aria-controls="collapseQuickstart">Show quickstart</a></div>
+```{python}
+#| code-fold: true
+#| echo: false
-###
+import ibis
-::: {#collapseQuickstart .collapse .multi-collapse}
+t = ibis.examples.penguins.fetch()
+t.to_parquet("penguins.parquet")
+```
-## Quickstart
+## Ibis: the portable Python dataframe library
-See [the getting started tutorial](tutorials/getting_started.qmd) for a more in-depth introduction to Ibis. Below is a quick overview.
+Ibis offers a familiar local dataframe experience with outstanding performance,
+using [DuckDB](https://duckdb.org) by default.
```{python}
import ibis # <1>
-import ibis.selectors as s # <1>
ibis.options.interactive = True # <2>
-t = ibis.examples.penguins.fetch() # <3>
+t = ibis.read_parquet("penguins.parquet", table_name="penguins") # <3>
t.head(3) # <4>
```
-1. Ensure you install Ibis first.
-2. Use interactive mode for exploratory data analysis (EDA) or demos.
-3. Load a dataset from the built-in examples.
-4. Display the table.
-
-
-Ibis is a dataframe library with familiar syntax.
-
-```{python}
-t[10:15] # <1>
-```
-
-1. Display a slice of the table.
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseAnalytics" role="button" aria-expanded="false" aria-controls="collapseAnalytics">Show analytics</a></div>
-
-###
+1. Import Ibis.
+2. Enable interactive mode for exploratory data analysis (EDA) or demos.
+3. Read a Parquet file and specify a table name (optional).
+4. Display the first few rows of the table.
-::: {#collapseAnalytics .collapse .multi-collapse}
-
-### Analytics
-
-Ibis is built for easy analytics at scale in Python.
+Iterate and explore data locally:
```{python}
-( # <1>
- t.filter(ibis._["body_mass_g"] != None) # <1>
- .group_by(["species", "island"]) # <1>
- .aggregate(count=ibis._.count()) # <1>
- .order_by(ibis.desc("count")) # <1>
-) # <1>
+grouped = t.group_by("species", "island").agg(count=t.count()).order_by("count") # <1>
+grouped # <2>
```
-1. Group by species and island, and compute the number of rows in each group.
-
-:::
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseVisualization" role="button" aria-expanded="false" aria-controls="collapseVisualization">Show EDA + visualization</a></div>
-
-###
-
-::: {#collapseVisualization .collapse .multi-collapse}
+1. Transform the table.
+2. Display the transformed table.
-### Exploratory data analysis (EDA) and visualization
+### One API for 20+ backends
-#### Exploratory data analysis
-
-Ibis has built-in methods for exploration and [visualization](#visualization).
+Use the same dataframe API for 20+ backends:
```{python}
-num_species = int(t.select("species").nunique().to_pandas()) # <1>
-t["species"].topk(num_species) # <2>
-```
-
-1. Compute the number of species in the dataset.
-2. Display the top species by count.
-
-#### Visualization
+#| code-fold: true
+#| echo: false
-Ibis works with any Python plotting library that supports the [dataframe interchange protocol](https://data-apis.org/dataframe-protocol/latest/index.html).
-
-```{python}
-grouped = ( # <1>
- t.group_by("species") # <1>
- .aggregate(count=ibis._.count()) # <1>
- .order_by(ibis.desc("count")) # <1>
-) # <1>
-grouped # <2>
+from backends_sankey import fig
+fig.show()
```
-1. Setup data to plot.
-2. Display the table.
+For example:
::: {.panel-tabset}
-## Altair
-
-```{.bash}
-pip install altair
-```
+## DuckDB
```{python}
-import altair as alt
-
-chart = (
- alt.Chart(grouped.to_pandas())
- .mark_bar()
- .encode(
- x="species",
- y="count",
- tooltip=["species", "count"],
- )
- .properties(width=600, height=400)
- .interactive()
-)
-chart
-```
-
-## matplotlib
-
-```{.bash}
-pip install matplotlib
+con = ibis.connect("duckdb://")
```
```{python}
-import matplotlib.pyplot as plt
-
-chart = grouped.to_pandas().plot.bar(
- x="species",
- y="count",
- figsize=(600 / 100, 400 / 100),
-)
-plt.show()
-```
-
-## Plotly
-
-```{.bash}
-pip install plotly
+t = con.read_parquet("penguins.parquet")
+t.head(3)
```
```{python}
-import plotly.express as px
-
-chart = px.bar(
- grouped.to_pandas(),
- x="species",
- y="count",
- width=600,
- height=400,
-)
-chart
+t.group_by("species", "island").agg(count=t.count()).order_by("count")
```
-## plotnine
+## Polars
-```{.bash}
-pip install plotnine
-```
```{python}
-from plotnine import ggplot, aes, geom_bar, theme
-
-chart = (
- ggplot(
- grouped,
- aes(x="species", y="count"),
- )
- + geom_bar(stat="identity")
- + theme(figure_size=(600 / 100, 400 / 100))
-)
-chart
-```
-
-## seaborn
-
-```{.bash}
-pip install seaborn
+con = ibis.connect("polars://")
```
```{python}
-import seaborn as sns
-
-chart = sns.barplot(
- data=grouped.to_pandas(),
- x="species",
- y="count",
-)
-chart.figure.set_size_inches(600 / 100, 400 / 100)
+t = con.read_parquet("penguins.parquet")
+t.head(3)
```
-:::
-
-:::
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseDataScience" role="button" aria-expanded="false" aria-controls="collapseDataScience">Show data science</a></div>
-
-###
-
-::: {#collapseDataScience .collapse .multi-collapse}
-
-### Data science
-
-Use Ibis with your favorite data science libraries for concise and efficient workflows.
-
```{python}
-import ibis.selectors as s # <1>
-
-
-def transform(t): # <2>
- t = t.mutate( # <2>
- s.across(s.numeric(), {"zscore": lambda x: (x - x.mean()) / x.std()}) # <2>
- ).dropna() # <2>
- return t # <2>
-
-
-f = transform(t.drop("year")) # <3>
-f.select("species", "island", s.contains("zscore")) # <4>
+t.group_by("species", "island").agg(count=t.count()).order_by("count")
```
-1. Import the selectors module.
-2. Define a function to transform the table for code reuse (compute z-scores on numeric columns).
-3. Apply the function to the table and assign it to a new variable.
-4. Display the transformed table.
-
-```bash
-pip install scikit-learn
-```
+## DataFusion
```{python}
-import plotly.express as px # <1>
-from sklearn.decomposition import PCA # <1>
-
-X = f.select(s.contains("zscore")) # <2>
-
-n_components = 3 # <3>
-pca = PCA(n_components=n_components).fit(X) # <3>
-
-t_pca = ibis.memtable(pca.transform(X)).rename( # <4>
- {"pc1": "col0", "pc2": "col1", "pc3": "col2"} # <4>
-) # <4>
-
-f = f.mutate(row_number=ibis.row_number().over()).join( # <5>
- t_pca.mutate(row_number=ibis.row_number().over()), # <5>
- "row_number", # <5>
-) # <5>
-
-px.scatter_3d( # <6>
- f.to_pandas(), # <6>
- x="pc1", # <6>
- y="pc2", # <6>
- z="pc3", # <6>
- color="species", # <6>
- symbol="island", # <6>
-) # <6>
+con = ibis.connect("datafusion://")
```
-1. Import data science libraries
-2. Select "features" (numeric columns) as X
-3. Compute PCA
-4. Create a table from the PCA results
-5. Join the PCA results to the original table
-6. Plot the results
-
-:::
-
-###
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseInputOutput" role="button" aria-expanded="false" aria-controls="collapseInputOutput">Show input and output</a></div>
-
-###
-
-::: {#collapseInputOutput .collapse .multi-collapse}
-
-### Input and output
-
-Ibis supports a variety of input and output options.
-
-{{< include /_code/input_output_penguins.qmd >}}
-
-:::
-
-<div class="d-grid gap-2"><a class="btn btn-lg btn-primary" data-bs-toggle="collapse" href="#collapseSQLPython" role="button" aria-expanded="false" aria-controls="collapseSQLPython">Show SQL + Python</a></div>
-
-::: {#collapseSQLPython .collapse .multi-collapse}
-
-### SQL + Python
-
-Ibis has the `ibis.to_sql` to generate SQL strings.
-
-In a Jupyter notebook or IPython shell session, the output of `ibis.to_sql` will be syntax highlighted.
-
-In a plain Python REPL use `print(ibis.to_sql(...))` to pretty print SQL.
-
-Ibis uses [SQLGlot](https://sqlglot.com) under the hood to allow passing a `dialect` parameter to SQL methods.
-
-::: {.panel-tabset}
-
-## BigQuery
-
-```{python}
-dialect = "bigquery" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
-```
-
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
-
-You can chain Ibis expressions and `.sql` together.
-
```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
+t = con.read_parquet("penguins.parquet")
+t.head(3)
```
-1. Chain `.sql` calls and Ibis expressions together.
-
-## Snowflake
-
```{python}
-dialect = "snowflake" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
+t.group_by("species", "island").agg(count=t.count()).order_by("count")
```
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
-
-You can chain Ibis expressions and `.sql` together.
+## PySpark
```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
+con = ibis.connect("pyspark://")
```
-1. Chain `.sql` calls and Ibis expressions together.
-
-## Oracle
-
```{python}
-dialect = "oracle" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
+t = con.read_parquet("penguins.parquet")
+t.head(3)
```
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
-
-You can chain Ibis expressions and `.sql` together.
-
```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
+t.group_by("species", "island").agg(count=t.count()).order_by("count")
```
-1. Chain `.sql` calls and Ibis expressions together.
+:::
-## MySQL
+This allows you to iterate locally and deploy remotely by changing a single line
+of code. For instance, develop locally with DuckDB and deploy remotely to
+BigQuery. Or, using any combination of backends that meet your requirements.
-```{python}
-dialect = "mysql" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
-```
+### Python + SQL: better together
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
+Ibis works by decoupling the dataframe API from the backend execution. Most
+backends support a SQL dialect, which Ibis compiles its expressions into using
+[SQLGlot](https://github.com/tobymao/sqlglot). You can inspect the SQL that Ibis
+generates for any SQL backend:
-You can chain Ibis expressions and `.sql` together.
```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
+ibis.to_sql(grouped) # <1>
```
-1. Chain `.sql` calls and Ibis expressions together.
+1. Display the SQL generated from the table expression.
-## MSSQL
+And use SQL strings directly, mixing and matching with Python dataframe code:
```{python}
-dialect = "mssql" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
-```
+#| code-fold: true
+#| echo: false
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
-
-You can chain Ibis expressions and `.sql` together.
-
-```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
+t = ibis.read_parquet("penguins.parquet", table_name="penguins")
```
-1. Chain `.sql` calls and Ibis expressions together.
-
-## PostgreSQL
-
```{python}
-dialect = "postgres" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
+t.sql( # <1>
+ "SELECT species, island, COUNT(*) AS count FROM penguins GROUP BY species, island" # <1>
+).order_by("count") # <2>
```
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
-
-You can chain Ibis expressions and `.sql` together.
+1. Transform the table using SQL.
+2. Then, transform the table using Python dataframe code.
-```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
-```
+This allows you to combine the flexibility of Python with the scale and
+performance of modern SQL.
-1. Chain `.sql` calls and Ibis expressions together.
+::: {.text-center}
+## Users say...
+:::
-## SQLite
+::: {.index-grid}
-```{python}
-dialect = "sqlite" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
-```
+::: {.index-g-col-4 .card .border-light .mb-3 .text-center}
+::: {.card-body}
+["Ibis is amazing, there is so much bikeshedding out there that this library
+improves upon. I love that now we can empower any visualization with nearly
+any dataset! Big thanks to those who have contributed!"]{.card-text}
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
+[Nick Shook]{.blockquote-footer}
+:::
+:::
-You can chain Ibis expressions and `.sql` together.
+::: {.index-g-col-4 .card .border-light .mb-3 .text-center}
+::: {.card-body}
+"I now have Ibis code that runs PySpark in my Databricks environment and Polars
+on my laptop which is pretty slick 🔥"
-```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
-```
+[Mark Druffel]{.blockquote-footer}
+:::
+:::
-1. Chain `.sql` calls and Ibis expressions together.
+::: {.index-g-col-4 .card .border-light .mb-3 .text-center}
+::: {.card-body}
+"I love that with Ibis, I can use SQL for the heavy lifting or aggregations and
+then switch to a dataframe-like API for the type of dynamic transformations that
+would otherwise be tedious to do in pure SQL."
-## Trino
+[Daniel Kim]{.blockquote-footer}
+:::
+:::
-```{python}
-dialect = "trino" # <1>
-sql = ibis.to_sql( # <2>
- grouped, # <2>
- dialect=dialect, # <2>
-) # <2>
-sql # <3>
-```
+:::
-1. Set the dialect.
-2. Convert the table to a SQL string.
-3. Display the SQL string.
+::: {.text-center}
+## Get started with Ibis
+:::
-You can chain Ibis expressions and `.sql` together.
+::: {.index-grid .text-center}
-```{python}
-con.sql(sql, dialect=dialect).filter(ibis._["species"] == "Adelie") # <1>
-```
+::: {.index-g-col-4}
+[Why Ibis?](why.qmd){.btn .btn-primary .w-100}
+:::
-1. Chain `.sql` calls and Ibis expressions together.
+::: {.index-g-col-4}
+[Tutorial: getting started](tutorials/getting_started.qmd){.btn .btn-primary .w-100}
+:::
+::: {.index-g-col-4}
+[API reference](/reference){.btn .btn-primary .w-100}
:::
:::
diff --git a/docs/styles.css b/docs/styles.css
index a4608cdf0d1a..01ed4c84633b 100644
--- a/docs/styles.css
+++ b/docs/styles.css
@@ -21,3 +21,17 @@ section[id^="parameters-"] {
margin: auto;
display: block;
}
+
+.index-grid {
+ @extend .grid;
+ display: flex;
+ justify-content: space-between;
+}
+
+.index-g-col-4 {
+ @extend .g-col-4;
+ flex: 1;
+ /* Ensures all columns grow to fill the same space */
+ margin: 0 5px;
+ /* Adds a small margin between columns */
+}
diff --git a/docs/tutorials/getting_started.qmd b/docs/tutorials/getting_started.qmd
index 049a72ec495f..f55c79d3a59e 100644
--- a/docs/tutorials/getting_started.qmd
+++ b/docs/tutorials/getting_started.qmd
@@ -2,6 +2,12 @@
This is a quick tour of some basic commands and usage patterns, just to get your flippers wet.
+::: {.callout-tip}
+You can run this tutorial in a GitHub Codespace with everything setup for you:
+
+[](https://codespaces.new/ibis-project/ibis)
+:::
+
## Install Ibis
{{< include ../_tabsets/install_default.qmd >}}
|
readthedocs__readthedocs.org-2712 | Document that RTD uses `rel` branch for production
Hi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.
Thanks in advance!
Oli
| [
{
"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010-2017, Read the Docs, Inc & contributors'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n",
"path": "docs/conf.py"
}
] | diff --git a/LICENSE.mit b/LICENSE.mit
index fcb099e132c..2447f29c36a 100644
--- a/LICENSE.mit
+++ b/LICENSE.mit
@@ -1,4 +1,4 @@
-Copyright (c) 2011 Charles Leifer, Eric Holscher, Bobby Grace
+Copyright (c) 2010-2017 Read the Docs, Inc & contributors
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
diff --git a/docs/alternate_domains.rst b/docs/alternate_domains.rst
index 6593c3ee0fa..9c84fef99a0 100644
--- a/docs/alternate_domains.rst
+++ b/docs/alternate_domains.rst
@@ -20,9 +20,12 @@ This requires two steps:
* Add a CNAME record in your DNS that point to our servers `readthedocs.io`
* Add a Domain object in the **Project Admin > Domains** page for your project.
+.. note:: The ``Domain`` that should be used is the actual subdomain that you want your docs served on.
+ Generally it will be `docs.projectname.org`.
+
Using pip as an example, http://www.pip-installer.org resolves, but is hosted on our infrastructure.
-As an example, fabric's dig record looks like this::
+As another example, fabric's dig record looks like this::
-> dig docs.fabfile.org
...
diff --git a/docs/api.rst b/docs/api.rst
index a7e252e78aa..4b51fd85614 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -4,6 +4,9 @@ Read the Docs Public API
We have a limited public API that is available for you to get data out of the site.
This document covers only part of the API provided. We have plans to create a read/write API, so that you can easily automate interactions with your project.
+.. warning:: This API is out of date and not currently maintained.
+ We have a v2 API that is currently supported at http://readthedocs.org/api/v2/.
+
A basic API client using slumber
--------------------------------
diff --git a/docs/builds.rst b/docs/builds.rst
index 2d8dfacc202..ec36a3e32a4 100644
--- a/docs/builds.rst
+++ b/docs/builds.rst
@@ -15,6 +15,10 @@ Our current build limits are:
We can increase build limits on a per-project basis,
if you provide a good reason your documentation needs more resources.
+You can see the current Docker build images that we use in our `docker repository <https://github.com/rtfd/readthedocs-docker-images>`_. `Docker Hub <https://hub.docker.com/r/readthedocs/build/>`_ also shows the latest set of images that have been built.
+
+Currently in production we're using the ``readthedocs/build:2.0`` docker image as our default image.
+
How we build documentation
--------------------------
diff --git a/docs/conf.py b/docs/conf.py
index 072c96a5bce..5a3b27dc95a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -32,7 +32,7 @@
master_doc = 'index'
project = u'Read The Docs'
-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
+copyright = u'2010-2017, Read the Docs, Inc & contributors'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
diff --git a/docs/contribute.rst b/docs/contribute.rst
index 6b7b629b234..95e288981a0 100644
--- a/docs/contribute.rst
+++ b/docs/contribute.rst
@@ -45,6 +45,9 @@ Triaging tickets
Here is a brief explanation on how we triage incoming tickets to get a better
sense of what needs to be done on what end.
+.. note:: You will need Triage permission on the project in order to do this.
+ You can ask one of the members of the :doc:`team` to give you access.
+
Initial triage
~~~~~~~~~~~~~~
diff --git a/docs/faq.rst b/docs/faq.rst
index 8e6540bd310..c6ad95f4690 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -96,8 +96,6 @@ Deleting a stale or broken build environment
If you're having trouble getting your version to build, try wiping out the existing build/environment files. On your version list page ``/projects/[project]/versions`` there is a "Wipe" button that will remove all of the files associated with your documentation build, but not the documentation itself.
-
-
How do I host multiple projects on one CNAME?
---------------------------------------------
@@ -151,11 +149,6 @@ How do I support multiple languages of documentation?
See the section on :ref:`Localization of Documentation`.
-Do I need to be whitelisted?
-----------------------------
-
-No. Whitelisting has been removed as a concept in Read the Docs. You should have access to all of the features already.
-
Does Read The Docs work well with "legible" docstrings?
-------------------------------------------------------
@@ -207,3 +200,8 @@ file* field.
.. _Sphinx's autoapi: http://sphinx-doc.org/ext/autodoc.html
.. _pip requirements file: https://pip.pypa.io/en/stable/user_guide.html#requirements-files
+
+What commit of Read the Docs is in production?
+----------------------------------------------
+
+We deploy readthedocs.org from the `rel` branch in our GitHub repository. You can see the latest commits that have been deployed by looking on GitHub: https://github.com/rtfd/readthedocs.org/commits/rel
\ No newline at end of file
|
adamchainz__django-cors-headers-851 | Listing Origin, DNT, or Accept-Encoding as allowed request headers is never necessary
### Understanding CORS
- [X] I have read the resources.
### Python Version
_No response_
### Django Version
_No response_
### Package Version
_No response_
### Description
The [README](https://github.com/adamchainz/django-cors-headers#cors_allow_headers-sequencestr) explicitly lists `"accept-encoding"`, `"dnt"`, and `"origin"` in the `CORS_ALLOW_HEADERS` list:
```python
CORS_ALLOW_HEADERS = [
# omitted
"accept-encoding",
# omitted
"dnt",
"origin",
# omitted
]
```
However, contrary to popular belief and according to the Fetch standard, allowing those request headers is never necessary. As so-called [_forbidden request headers_](https://fetch.spec.whatwg.org/#forbidden-request-header), they're indeed handled by the browser, not by the client.
You can safely drop those three elements from that list.
| [
{
"content": "from __future__ import annotations\n\ndefault_headers = (\n \"accept\",\n \"accept-encoding\",\n \"authorization\",\n \"content-type\",\n \"dnt\",\n \"origin\",\n \"user-agent\",\n \"x-csrftoken\",\n \"x-requested-with\",\n)\n\ndefault_methods = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n",
"path": "src/corsheaders/defaults.py"
}
] | [
{
"content": "from __future__ import annotations\n\ndefault_headers = (\n \"accept\",\n \"authorization\",\n \"content-type\",\n \"user-agent\",\n \"x-csrftoken\",\n \"x-requested-with\",\n)\n\ndefault_methods = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n",
"path": "src/corsheaders/defaults.py"
}
] | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index c66c08e5..d3df4261 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -2,6 +2,12 @@
Changelog
=========
+* Remove three headers from the default "accept list": ``accept-encoding``, ``dnt``, and ``origin``.
+ These are `Forbidden header names <https://developer.mozilla.org/en-US/docs/Glossary/Forbidden_header_name>`__, which means requests JavaScript can never set them.
+ Consequently, allowing them via CORS has no effect.
+
+ Thanks to jub0bs for the report in `Issue #842 <https://github.com/adamchainz/django-cors-headers/issues/842>`__.
+
* Drop the ``CORS_REPLACE_HTTPS_REFERER`` setting and ``CorsPostCsrfMiddleware``.
Since Django 1.9, the ``CSRF_TRUSTED_ORIGINS`` setting has been the preferred solution to making CSRF checks pass for CORS requests.
The removed setting and middleware only existed as a workaround for Django versions before 1.9.
diff --git a/README.rst b/README.rst
index 0822b081..b9fc9f87 100644
--- a/README.rst
+++ b/README.rst
@@ -237,17 +237,14 @@ __ https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allo
.. code-block:: python
- CORS_ALLOW_HEADERS = [
+ CORS_ALLOW_HEADERS = (
"accept",
- "accept-encoding",
"authorization",
"content-type",
- "dnt",
- "origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
- ]
+ )
The default can be imported as ``corsheaders.defaults.default_headers`` so you can extend it with your custom headers.
This allows you to keep up to date with any future changes.
@@ -257,9 +254,10 @@ For example:
from corsheaders.defaults import default_headers
- CORS_ALLOW_HEADERS = list(default_headers) + [
+ CORS_ALLOW_HEADERS = (
+ *default_headers,
"my-custom-header",
- ]
+ )
``CORS_EXPOSE_HEADERS: Sequence[str]``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/src/corsheaders/defaults.py b/src/corsheaders/defaults.py
index 0897582b..35ec7a4b 100644
--- a/src/corsheaders/defaults.py
+++ b/src/corsheaders/defaults.py
@@ -2,11 +2,8 @@
default_headers = (
"accept",
- "accept-encoding",
"authorization",
"content-type",
- "dnt",
- "origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
|
microsoft__botbuilder-python-1907 | German language is not appropiate used when using Confirmprompts
### The Issue
I am building a chatbot for german users. I am sending the local "de-de" as user, and can confirm this actual arrives the bot. When i want to use Confirmprompts the bot returns Yes and No and not "Ja" "Nein".
### The Solution
After a lot of digging, I found the underlying cause and a fix. The culture model does not actually recognices German (de-de) as supported language, and thus switches to the default (english). But in the prompt_culture_models.py German actualy exists and ther is a todo "# TODO: Replace with Culture.German after Recognizers-Text package updates." Which I looked up and the Recognizers-Text package sis already updated :) . Still this is not the real issue.
The reason is that german is not listed in the supported cultures function. I simply added it and every thing works fine.
` @classmethod
def get_supported_cultures(cls) -> List[PromptCultureModel]:
"""
Gets a list of the supported culture models.
"""
return [
cls.Chinese,
cls.German,
cls.Dutch,
cls.English,
cls.French,
cls.Italian,
cls.Japanese,
cls.Korean,
cls.Portuguese,
cls.Spanish,
cls.Turkish,
]`
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom recognizers_text import Culture\n\n\nclass PromptCultureModel:\n \"\"\"\n Culture model used in Choice and Confirm Prompts.\n \"\"\"\n\n def __init__(\n self,\n locale: str,\n separator: str,\n inline_or: str,\n inline_or_more: str,\n yes_in_language: str,\n no_in_language: str,\n ):\n \"\"\"\n\n :param locale: Culture Model's Locale. Example: \"en-US\".\n :param separator: Culture Model's Inline Separator. Example: \", \".\n :param inline_or: Culture Model's Inline Or. Example: \" or \".\n :param inline_or_more Culture Model's Inline Or More. Example: \", or \".\n :param yes_in_language: Equivalent of \"Yes\" in Culture Model's Language. Example: \"Yes\".\n :param no_in_language: Equivalent of \"No\" in Culture Model's Language. Example: \"No\".\n \"\"\"\n self.locale = locale\n self.separator = separator\n self.inline_or = inline_or\n self.inline_or_more = inline_or_more\n self.yes_in_language = yes_in_language\n self.no_in_language = no_in_language\n\n\nclass PromptCultureModels:\n \"\"\"\n Class container for currently-supported Culture Models in Confirm and Choice Prompt.\n \"\"\"\n\n Chinese = PromptCultureModel(\n locale=Culture.Chinese,\n inline_or=\" 要么 \",\n inline_or_more=\", 要么 \",\n separator=\", \",\n no_in_language=\"不\",\n yes_in_language=\"是的\",\n )\n\n Dutch = PromptCultureModel(\n locale=Culture.Dutch,\n inline_or=\" of \",\n inline_or_more=\", of \",\n separator=\", \",\n no_in_language=\"Nee\",\n yes_in_language=\"Ja\",\n )\n\n English = PromptCultureModel(\n locale=Culture.English,\n inline_or=\" or \",\n inline_or_more=\", or \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Yes\",\n )\n\n French = PromptCultureModel(\n locale=Culture.French,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"Non\",\n yes_in_language=\"Oui\",\n )\n\n German = PromptCultureModel(\n # TODO: Replace with Culture.German after Recognizers-Text package updates.\n locale=\"de-de\",\n inline_or=\" oder \",\n inline_or_more=\", oder \",\n separator=\", \",\n no_in_language=\"Nein\",\n yes_in_language=\"Ja\",\n )\n\n Italian = PromptCultureModel(\n locale=Culture.Italian,\n inline_or=\" o \",\n inline_or_more=\" o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Si\",\n )\n\n Japanese = PromptCultureModel(\n locale=Culture.Japanese,\n inline_or=\" または \",\n inline_or_more=\"、 または \",\n separator=\"、 \",\n no_in_language=\"いいえ\",\n yes_in_language=\"はい\",\n )\n\n Korean = PromptCultureModel(\n locale=Culture.Korean,\n inline_or=\" 또는 \",\n inline_or_more=\" 또는 \",\n separator=\", \",\n no_in_language=\"아니\",\n yes_in_language=\"예\",\n )\n\n Portuguese = PromptCultureModel(\n locale=Culture.Portuguese,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"Não\",\n yes_in_language=\"Sim\",\n )\n\n Spanish = PromptCultureModel(\n locale=Culture.Spanish,\n inline_or=\" o \",\n inline_or_more=\", o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Sí\",\n )\n\n Turkish = PromptCultureModel(\n locale=Culture.Turkish,\n inline_or=\" veya \",\n inline_or_more=\" veya \",\n separator=\", \",\n no_in_language=\"Hayır\",\n yes_in_language=\"Evet\",\n )\n\n @classmethod\n def map_to_nearest_language(cls, culture_code: str) -> str:\n \"\"\"\n Normalize various potential locale strings to a standard.\n :param culture_code: Represents locale. Examples: \"en-US, en-us, EN\".\n :return: Normalized locale.\n :rtype: str\n\n .. remarks::\n In our other SDKs, this method is a copy/paste of the ones from the Recognizers-Text library.\n However, that doesn't exist in Python.\n \"\"\"\n if culture_code:\n culture_code = culture_code.lower()\n supported_culture_codes = cls._get_supported_locales()\n\n if culture_code not in supported_culture_codes:\n culture_prefix = culture_code.split(\"-\")[0]\n\n for supported_culture_code in supported_culture_codes:\n if supported_culture_code.startswith(culture_prefix):\n culture_code = supported_culture_code\n\n return culture_code\n\n @classmethod\n def get_supported_cultures(cls) -> List[PromptCultureModel]:\n \"\"\"\n Gets a list of the supported culture models.\n \"\"\"\n return [\n cls.Chinese,\n cls.Dutch,\n cls.English,\n cls.French,\n cls.Italian,\n cls.Japanese,\n cls.Korean,\n cls.Portuguese,\n cls.Spanish,\n cls.Turkish,\n ]\n\n @classmethod\n def _get_supported_locales(cls) -> List[str]:\n return [c.locale for c in cls.get_supported_cultures()]\n",
"path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom recognizers_text import Culture\n\n\nclass PromptCultureModel:\n \"\"\"\n Culture model used in Choice and Confirm Prompts.\n \"\"\"\n\n def __init__(\n self,\n locale: str,\n separator: str,\n inline_or: str,\n inline_or_more: str,\n yes_in_language: str,\n no_in_language: str,\n ):\n \"\"\"\n\n :param locale: Culture Model's Locale. Example: \"en-US\".\n :param separator: Culture Model's Inline Separator. Example: \", \".\n :param inline_or: Culture Model's Inline Or. Example: \" or \".\n :param inline_or_more Culture Model's Inline Or More. Example: \", or \".\n :param yes_in_language: Equivalent of \"Yes\" in Culture Model's Language. Example: \"Yes\".\n :param no_in_language: Equivalent of \"No\" in Culture Model's Language. Example: \"No\".\n \"\"\"\n self.locale = locale\n self.separator = separator\n self.inline_or = inline_or\n self.inline_or_more = inline_or_more\n self.yes_in_language = yes_in_language\n self.no_in_language = no_in_language\n\n\nclass PromptCultureModels:\n \"\"\"\n Class container for currently-supported Culture Models in Confirm and Choice Prompt.\n \"\"\"\n\n Chinese = PromptCultureModel(\n locale=Culture.Chinese,\n inline_or=\" 要么 \",\n inline_or_more=\", 要么 \",\n separator=\", \",\n no_in_language=\"不\",\n yes_in_language=\"是的\",\n )\n\n Dutch = PromptCultureModel(\n locale=Culture.Dutch,\n inline_or=\" of \",\n inline_or_more=\", of \",\n separator=\", \",\n no_in_language=\"Nee\",\n yes_in_language=\"Ja\",\n )\n\n English = PromptCultureModel(\n locale=Culture.English,\n inline_or=\" or \",\n inline_or_more=\", or \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Yes\",\n )\n\n French = PromptCultureModel(\n locale=Culture.French,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"Non\",\n yes_in_language=\"Oui\",\n )\n\n German = PromptCultureModel(\n # TODO: Replace with Culture.German after Recognizers-Text package updates.\n locale=\"de-de\",\n inline_or=\" oder \",\n inline_or_more=\", oder \",\n separator=\", \",\n no_in_language=\"Nein\",\n yes_in_language=\"Ja\",\n )\n\n Italian = PromptCultureModel(\n locale=Culture.Italian,\n inline_or=\" o \",\n inline_or_more=\" o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Si\",\n )\n\n Japanese = PromptCultureModel(\n locale=Culture.Japanese,\n inline_or=\" または \",\n inline_or_more=\"、 または \",\n separator=\"、 \",\n no_in_language=\"いいえ\",\n yes_in_language=\"はい\",\n )\n\n Korean = PromptCultureModel(\n locale=Culture.Korean,\n inline_or=\" 또는 \",\n inline_or_more=\" 또는 \",\n separator=\", \",\n no_in_language=\"아니\",\n yes_in_language=\"예\",\n )\n\n Portuguese = PromptCultureModel(\n locale=Culture.Portuguese,\n inline_or=\" ou \",\n inline_or_more=\", ou \",\n separator=\", \",\n no_in_language=\"Não\",\n yes_in_language=\"Sim\",\n )\n\n Spanish = PromptCultureModel(\n locale=Culture.Spanish,\n inline_or=\" o \",\n inline_or_more=\", o \",\n separator=\", \",\n no_in_language=\"No\",\n yes_in_language=\"Sí\",\n )\n\n Turkish = PromptCultureModel(\n locale=Culture.Turkish,\n inline_or=\" veya \",\n inline_or_more=\" veya \",\n separator=\", \",\n no_in_language=\"Hayır\",\n yes_in_language=\"Evet\",\n )\n\n @classmethod\n def map_to_nearest_language(cls, culture_code: str) -> str:\n \"\"\"\n Normalize various potential locale strings to a standard.\n :param culture_code: Represents locale. Examples: \"en-US, en-us, EN\".\n :return: Normalized locale.\n :rtype: str\n\n .. remarks::\n In our other SDKs, this method is a copy/paste of the ones from the Recognizers-Text library.\n However, that doesn't exist in Python.\n \"\"\"\n if culture_code:\n culture_code = culture_code.lower()\n supported_culture_codes = cls._get_supported_locales()\n\n if culture_code not in supported_culture_codes:\n culture_prefix = culture_code.split(\"-\")[0]\n\n for supported_culture_code in supported_culture_codes:\n if supported_culture_code.startswith(culture_prefix):\n culture_code = supported_culture_code\n\n return culture_code\n\n @classmethod\n def get_supported_cultures(cls) -> List[PromptCultureModel]:\n \"\"\"\n Gets a list of the supported culture models.\n \"\"\"\n return [\n cls.Chinese,\n cls.German,\n cls.Dutch,\n cls.English,\n cls.French,\n cls.Italian,\n cls.Japanese,\n cls.Korean,\n cls.Portuguese,\n cls.Spanish,\n cls.Turkish,\n ]\n\n @classmethod\n def _get_supported_locales(cls) -> List[str]:\n return [c.locale for c in cls.get_supported_cultures()]\n",
"path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py"
}
] | diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
index 1572ac688..abb527e21 100644
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py
@@ -174,6 +174,7 @@ def get_supported_cultures(cls) -> List[PromptCultureModel]:
"""
return [
cls.Chinese,
+ cls.German,
cls.Dutch,
cls.English,
cls.French,
|
AnalogJ__lexicon-1356 | Bug in create action for glesys provider
When creating an A record with the glesys provider, the full name is added instead of the host name.
```
lexicon_config = {
"provider_name" : "glesys",
"action": "create",
"domain": "somedomain.com",
"type": "A",
"name": "lexicon",
"content": "1.2.3.4",
"glesys": {
}
}
```
Results in the A-record:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon.somedomain.com', 'ttl': 3600, 'content': '1.2.3.4'}`
While the expected result is:
`{'id': 2723410, 'type': 'A', 'name': 'lexicon', 'ttl': 3600, 'content': '1.2.3.4'}`
The request data sent to `domain/addrecord` :
`{'domainname': 'somedomain.com', 'host': 'lexicon.somedomain.com', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Expected request data to `domain/addrecord`:
`{'domainname': 'somedomain.com', 'host': 'lexicon', 'type': 'A', 'data': '1.2.3.4', 'ttl': 3600}`
Glesys API documentation:
```
domain/addrecord
Url: https://api.glesys.com/domain/addrecord
Method: Only Https POST
Required arguments: domainname , host , type , data
Optional arguments: ttl
Description: Adds a dns record to a domain
```
| [
{
"content": "\"\"\"Module provider for Glesys\"\"\"\nimport json\n\nimport requests\n\nfrom lexicon.exceptions import AuthenticationError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nNAMESERVER_DOMAINS = [\"glesys.com\"]\n\n\ndef provider_parser(subparser):\n \"\"\"Generate a subparser for Glesys\"\"\"\n subparser.add_argument(\"--auth-username\", help=\"specify username (CL12345)\")\n subparser.add_argument(\"--auth-token\", help=\"specify API key\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Glesys\"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = \"https://api.glesys.com\"\n\n def _authenticate(self):\n payload = self._get(\"/domain/list\")\n domains = payload[\"response\"][\"domains\"]\n for record in domains:\n if record[\"domainname\"] == self.domain:\n # Domain records do not have any id.\n # Since domain_id cannot be None, use domain name as id instead.\n self.domain_id = record[\"domainname\"]\n break\n else:\n raise AuthenticationError(\"No domain found\")\n\n # Create record. If record already exists with the same content, do nothing.\n def _create_record(self, rtype, name, content):\n existing = self.list_records(rtype, name, content)\n if existing:\n # Already exists, do nothing.\n return True\n\n request_data = {\n \"domainname\": self.domain,\n \"host\": self._full_name(name),\n \"type\": rtype,\n \"data\": content,\n }\n self._addttl(request_data)\n\n self._post(\"/domain/addrecord\", data=request_data)\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n request_data = {\"domainname\": self.domain}\n payload = self._post(\"/domain/listrecords\", data=request_data)\n\n # Convert from Glesys record structure to Lexicon structure.\n processed_records = [\n self._glesysrecord2lexiconrecord(r) for r in payload[\"response\"][\"records\"]\n ]\n\n if rtype:\n processed_records = [\n record for record in processed_records if record[\"type\"] == rtype\n ]\n if name:\n processed_records = [\n record\n for record in processed_records\n if record[\"name\"] == self._full_name(name)\n ]\n if content:\n processed_records = [\n record\n for record in processed_records\n if record[\"content\"].lower() == content.lower()\n ]\n\n return processed_records\n\n # Update a record. Identifier must be specified.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n request_data = {\"recordid\": identifier}\n if name:\n request_data[\"host\"] = name\n if rtype:\n request_data[\"type\"] = rtype\n if content:\n request_data[\"data\"] = content\n\n self._addttl(request_data)\n self._post(\"/domain/updaterecord\", data=request_data)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n # If an identifier is specified, use it, otherwise do a lookup using type, name and content.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record[\"id\"] for record in records]\n else:\n delete_record_id.append(identifier)\n\n for record_id in delete_record_id:\n request_data = {\"recordid\": record_id}\n self._post(\"/domain/deleterecord\", data=request_data)\n\n return True\n\n # Helpers.\n def _request(self, action=\"GET\", url=\"/\", data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n\n query_params[\"format\"] = \"json\"\n default_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n\n credentials = (\n self._get_provider_option(\"auth_username\"),\n self._get_provider_option(\"auth_token\"),\n )\n response = requests.request(\n action,\n self.api_endpoint + url,\n params=query_params,\n data=json.dumps(data),\n headers=default_headers,\n auth=credentials,\n )\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n # Adds TTL parameter if passed as argument to lexicon.\n def _addttl(self, request_data):\n if self._get_lexicon_option(\"ttl\"):\n request_data[\"ttl\"] = self._get_lexicon_option(\"ttl\")\n\n # From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']\n def _glesysrecord2lexiconrecord(self, glesys_record):\n return {\n \"id\": glesys_record[\"recordid\"],\n \"type\": glesys_record[\"type\"],\n \"name\": glesys_record[\"host\"],\n \"ttl\": glesys_record[\"ttl\"],\n \"content\": glesys_record[\"data\"],\n }\n",
"path": "lexicon/providers/glesys.py"
}
] | [
{
"content": "\"\"\"Module provider for Glesys\"\"\"\nimport json\n\nimport requests\n\nfrom lexicon.exceptions import AuthenticationError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nNAMESERVER_DOMAINS = [\"glesys.com\"]\n\n\ndef provider_parser(subparser):\n \"\"\"Generate a subparser for Glesys\"\"\"\n subparser.add_argument(\"--auth-username\", help=\"specify username (CL12345)\")\n subparser.add_argument(\"--auth-token\", help=\"specify API key\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Glesys\"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = \"https://api.glesys.com\"\n\n def _authenticate(self):\n payload = self._get(\"/domain/list\")\n domains = payload[\"response\"][\"domains\"]\n for record in domains:\n if record[\"domainname\"] == self.domain:\n # Domain records do not have any id.\n # Since domain_id cannot be None, use domain name as id instead.\n self.domain_id = record[\"domainname\"]\n break\n else:\n raise AuthenticationError(\"No domain found\")\n\n # Create record. If record already exists with the same content, do nothing.\n def _create_record(self, rtype, name, content):\n existing = self.list_records(rtype, name, content)\n if existing:\n # Already exists, do nothing.\n return True\n\n request_data = {\n \"domainname\": self.domain,\n \"host\": name,\n \"type\": rtype,\n \"data\": content,\n }\n self._addttl(request_data)\n\n self._post(\"/domain/addrecord\", data=request_data)\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n request_data = {\"domainname\": self.domain}\n payload = self._post(\"/domain/listrecords\", data=request_data)\n\n # Convert from Glesys record structure to Lexicon structure.\n processed_records = [\n self._glesysrecord2lexiconrecord(r) for r in payload[\"response\"][\"records\"]\n ]\n\n if rtype:\n processed_records = [\n record for record in processed_records if record[\"type\"] == rtype\n ]\n if name:\n processed_records = [\n record\n for record in processed_records\n if record[\"name\"] == self._full_name(name)\n ]\n if content:\n processed_records = [\n record\n for record in processed_records\n if record[\"content\"].lower() == content.lower()\n ]\n\n return processed_records\n\n # Update a record. Identifier must be specified.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n request_data = {\"recordid\": identifier}\n if name:\n request_data[\"host\"] = name\n if rtype:\n request_data[\"type\"] = rtype\n if content:\n request_data[\"data\"] = content\n\n self._addttl(request_data)\n self._post(\"/domain/updaterecord\", data=request_data)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n # If an identifier is specified, use it, otherwise do a lookup using type, name and content.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record[\"id\"] for record in records]\n else:\n delete_record_id.append(identifier)\n\n for record_id in delete_record_id:\n request_data = {\"recordid\": record_id}\n self._post(\"/domain/deleterecord\", data=request_data)\n\n return True\n\n # Helpers.\n def _request(self, action=\"GET\", url=\"/\", data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n\n query_params[\"format\"] = \"json\"\n default_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n\n credentials = (\n self._get_provider_option(\"auth_username\"),\n self._get_provider_option(\"auth_token\"),\n )\n response = requests.request(\n action,\n self.api_endpoint + url,\n params=query_params,\n data=json.dumps(data),\n headers=default_headers,\n auth=credentials,\n )\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n\n # Adds TTL parameter if passed as argument to lexicon.\n def _addttl(self, request_data):\n if self._get_lexicon_option(\"ttl\"):\n request_data[\"ttl\"] = self._get_lexicon_option(\"ttl\")\n\n # From Glesys record structure: [u'domainname', u'recordid', u'type', u'host', u'ttl', u'data']\n def _glesysrecord2lexiconrecord(self, glesys_record):\n return {\n \"id\": glesys_record[\"recordid\"],\n \"type\": glesys_record[\"type\"],\n \"name\": glesys_record[\"host\"],\n \"ttl\": glesys_record[\"ttl\"],\n \"content\": glesys_record[\"data\"],\n }\n",
"path": "lexicon/providers/glesys.py"
}
] | diff --git a/lexicon/providers/glesys.py b/lexicon/providers/glesys.py
index 2b30919b9..4bbaffd20 100644
--- a/lexicon/providers/glesys.py
+++ b/lexicon/providers/glesys.py
@@ -44,7 +44,7 @@ def _create_record(self, rtype, name, content):
request_data = {
"domainname": self.domain,
- "host": self._full_name(name),
+ "host": name,
"type": rtype,
"data": content,
}
|
imAsparky__django-cookiecutter-59 | [FEAT]: Add Pyup to the Django project.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| [
{
"content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef recursive_force_delete_a_folder(folder_path):\n \"\"\"Recursively force delete a folder. USE WITH CAUTION.\"\"\"\n post_gen_setup(\n \"rm\",\n \"-rf\",\n folder_path,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef remove_file(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, filepath)):\n os.remove(os.path.join(PROJECT_DIRECTORY, filepath))\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n \"--initial-branch=main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n '\"chore(git): Initial Commit\"',\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n # Documentation options\n\n if \"{{ cookiecutter.include_sphinx_docs }}\" == \"n\":\n recursive_force_delete_a_folder(\"docs\")\n\n if \"{{ cookiecutter.use_readthedocs }}\" == \"n\":\n remove_file(\".readthedocs.yaml\")\n\n if (\n \"{{ cookiecutter.include_contributor_covenant_code_of_conduct }}\"\n == \"n\"\n ):\n remove_file(\"docs/source/code-of-conduct.rst\")\n\n if \"{{ cookiecutter.include_documentation_templates }}\" == \"n\":\n recursive_force_delete_a_folder(\"docs/source/doc-templates\")\n\n if \"{{ cookiecutter.include_how_to_contribute_template }}\" == \"n\":\n remove_file(\"docs/source/how-tos/how-to-contribute.rst\")\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_file(\"LICENSE.rst\")\n\n # Git options\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message }}\" == \"n\":\n remove_file(\".github/.git-commit-template.txt\")\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n\n if \"{{ cookiecutter.use_GH_custom_issue_templates }}\" == \"y\":\n remove_file(\".github/ISSUE_TEMPLATE.md\")\n else:\n recursive_force_delete_a_folder(\".github/ISSUE_TEMPLATE\")\n\n # Workflow options\n\n if \"{{ cookiecutter.use_pre_commit }}\" == \"n\":\n remove_file(\".pre-commit-config.yaml\")\n\n if \"{{ cookiecutter.use_GH_action_semantic_version }}\" != \"y\":\n remove_file(\"CHANGELOG.md\")\n remove_file(\".github/semantic.yaml\")\n remove_file(\".github/workflows/semantic_release.yaml\")\n\n if \"{{ cookiecutter.create_repo_auto_test_workflow }}\" == \"n\":\n remove_file(\".github/workflows/test_contribution.yaml\")\n",
"path": "hooks/post_gen_project.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef recursive_force_delete_a_folder(folder_path):\n \"\"\"Recursively force delete a folder. USE WITH CAUTION.\"\"\"\n post_gen_setup(\n \"rm\",\n \"-rf\",\n folder_path,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef remove_file(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, filepath)):\n os.remove(os.path.join(PROJECT_DIRECTORY, filepath))\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n \"--initial-branch=main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n '\"chore(git): Initial Commit\"',\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n # Documentation options\n\n if \"{{ cookiecutter.include_sphinx_docs }}\" == \"n\":\n recursive_force_delete_a_folder(\"docs\")\n\n if \"{{ cookiecutter.use_readthedocs }}\" == \"n\":\n remove_file(\".readthedocs.yaml\")\n\n if (\n \"{{ cookiecutter.include_contributor_covenant_code_of_conduct }}\"\n == \"n\"\n ):\n remove_file(\"docs/source/code-of-conduct.rst\")\n\n if \"{{ cookiecutter.include_documentation_templates }}\" == \"n\":\n recursive_force_delete_a_folder(\"docs/source/doc-templates\")\n\n if \"{{ cookiecutter.include_how_to_contribute_template }}\" == \"n\":\n remove_file(\"docs/source/how-tos/how-to-contribute.rst\")\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_file(\"LICENSE.rst\")\n\n # Git options\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message }}\" == \"n\":\n remove_file(\".github/.git-commit-template.txt\")\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n\n if \"{{ cookiecutter.use_GH_custom_issue_templates }}\" == \"y\":\n remove_file(\".github/ISSUE_TEMPLATE.md\")\n else:\n recursive_force_delete_a_folder(\".github/ISSUE_TEMPLATE\")\n\n # Workflow options\n\n if \"{{ cookiecutter.use_pre_commit }}\" == \"n\":\n remove_file(\".pre-commit-config.yaml\")\n\n if \"{{ cookiecutter.use_GH_action_semantic_version }}\" != \"y\":\n remove_file(\"CHANGELOG.md\")\n remove_file(\".github/semantic.yaml\")\n remove_file(\".github/workflows/semantic_release.yaml\")\n\n if \"{{ cookiecutter.create_repo_auto_test_workflow }}\" == \"n\":\n remove_file(\".github/workflows/test_contribution.yaml\")\n\n if \"{{ cookiecutter.use_pyup_io }}\" == \"n\":\n remove_file(\".pyup.yaml\")\n",
"path": "hooks/post_gen_project.py"
}
] | diff --git a/cookiecutter.json b/cookiecutter.json
index a664ffac..4144ada1 100644
--- a/cookiecutter.json
+++ b/cookiecutter.json
@@ -7,10 +7,11 @@
"git_project_name": "{{ cookiecutter.project_name.lower().replace(' ', '-').replace('_', '-') }}",
"project_slug": "{{ cookiecutter.project_name.lower()|replace(' ', '_')|replace('-', '_')|replace('.', '_')|trim() }}",
"project_short_description": "A Django project with all the boilerplate",
- "add_contributors_list": "n",
+ "add_contributors_list": ["n", "y"],
"version": "0.1.0",
"use_repo_status_badge": ["no", "concept", "wip", "active"],
"use_pre_commit": ["y", "n"],
+ "use_pyup_io": ["y", "n"],
"include_sphinx_docs": ["y", "n"],
"use_readthedocs": ["y", "n"],
@@ -38,7 +39,6 @@
"_copy_without_render": [
".pre-commit-config.yaml",
".github/workflows/semantic_release.yaml",
- ".github/workflows/semantic_release_test_pypi.yaml",
".github/workflows/test_contribution.yaml"
]
diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py
index e0212c0a..e8273d7c 100644
--- a/hooks/post_gen_project.py
+++ b/hooks/post_gen_project.py
@@ -179,3 +179,6 @@ def git_configure_custom_commit_message():
if "{{ cookiecutter.create_repo_auto_test_workflow }}" == "n":
remove_file(".github/workflows/test_contribution.yaml")
+
+ if "{{ cookiecutter.use_pyup_io }}" == "n":
+ remove_file(".pyup.yaml")
diff --git a/tests/test_bake_django.py b/tests/test_bake_django.py
index acebd19f..cc9162c9 100644
--- a/tests/test_bake_django.py
+++ b/tests/test_bake_django.py
@@ -565,6 +565,51 @@ def test_baked_django_readme_without_precommit_badge(cookies):
assert " :alt: pre-commit" not in readme_file
+def test_baked_django_with_pyup_io(cookies):
+ """Test Django pyup.io file has been generated correctly."""
+ default_django = cookies.bake()
+
+ assert ".pyup.yaml" in os.listdir(default_django.project_path)
+
+ pyup_path = default_django.project_path / ".pyup.yaml"
+ pyup_file = pyup_path.read_text().splitlines()
+
+ assert ' - "imAsparky""' in pyup_file
+
+ readme_path = default_django.project_path / "README.rst"
+ readme_file = readme_path.read_text().splitlines()
+
+ assert (
+ ".. image:: https://pyup.io/repos/github/imAsparky/django-boilerplate/shield.svg"
+ in readme_file
+ )
+ assert (
+ " :target: https://pyup.io/repos/github/imAsparky/django-boilerplate/"
+ in readme_file
+ )
+ assert " :alt: Updates" in readme_file
+
+
+def test_baked_django_without_pyup_io(cookies):
+ """Test Django pyup.io file has not been generated."""
+ non_default_django = cookies.bake(extra_context={"use_pyup_io": "n"})
+
+ assert ".pyup.yaml" not in os.listdir(non_default_django.project_path)
+
+ readme_path = non_default_django.project_path / "README.rst"
+ readme_file = readme_path.read_text().splitlines()
+
+ assert (
+ ".. image:: https://pyup.io/repos/github/imAsparky/django-boilerplate/shield.svg"
+ not in readme_file
+ )
+ assert (
+ " :target: https://pyup.io/repos/github/imAsparky/django-boilerplate/"
+ not in readme_file
+ )
+ assert " :alt: Updates" not in readme_file
+
+
def test_baked_django_with_read_the_docs(cookies):
"""Test Django readthedocs config has been generated correctly."""
default_django = cookies.bake()
diff --git a/{{cookiecutter.git_project_name}}/.pyup.yaml b/{{cookiecutter.git_project_name}}/.pyup.yaml
new file mode 100644
index 00000000..1de1b733
--- /dev/null
+++ b/{{cookiecutter.git_project_name}}/.pyup.yaml
@@ -0,0 +1,32 @@
+# configure updates globally
+# default: all
+# allowed: all, insecure, False
+update: all
+
+# configure dependency pinning globally
+# default: True
+# allowed: True, False
+pin: True
+
+# set the default branch
+# default: empty, the default branch on GitHub
+Default Branch: main
+
+# assign users to pull requests, default is not set
+# requires private repo permissions, even on public repos
+# default: empty
+assignees:
+ - "{{cookiecutter.github_username}}""
+
+# add a label to pull requests, default is not set
+# requires private repo permissions, even on public repos
+# default: empty
+label_prs: "fix(pyup): Update dependencies"
+
+# allow to close stale PRs
+# default: True
+close_prs: False
+
+requirements:
+ - "requirements_dev.txt"
+ - "docs/requirements.txt"
diff --git a/{{cookiecutter.git_project_name}}/README.rst b/{{cookiecutter.git_project_name}}/README.rst
index 85959e5b..efdf14f1 100644
--- a/{{cookiecutter.git_project_name}}/README.rst
+++ b/{{cookiecutter.git_project_name}}/README.rst
@@ -4,12 +4,19 @@
*{{cookiecutter.project_short_description}}*
-{%- if cookiecutter.use_repo_status_badge != "no" %}
+{%- if cookiecutter.use_repo_status_badge != "n" %}
.. image:: https://www.repostatus.org/badges/latest/{{cookiecutter.use_repo_status_badge}}.svg
:target: https://www.repostatus.org/#{{cookiecutter.use_repo_status_badge}}
:alt: Project Status: {{cookiecutter.use_repo_status_badge}}
{%- endif %}
+{%- if cookiecutter.use_pyup_io == "y" %}
+.. image:: https://pyup.io/repos/github/{{cookiecutter.github_username}}/{{cookiecutter.git_project_name}}/shield.svg
+ :target: https://pyup.io/repos/github/{{cookiecutter.github_username}}/{{cookiecutter.git_project_name}}/
+ :alt: Updates
+{%- endif %}
+
+
{%- if cookiecutter.use_pre_commit == "y" %}
.. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
:target: https://github.com/pre-commit/pre-commit
|
flairNLP__flair-2711 | SciSpacyTokenizer.tokenize() function is broken
**Describe the bug**
Two minor bugs were introduced to Flair's `SciSpacyTokenizer` as part of #2645
The bugs prevent usage of `SciSpacyTokenizer` and tokenizers that leverage it.
**To Reproduce**
Any use of classes that leverage `SciSpacyTokenizer` will raise errors. For example:
```python
from flair.tokenization import SciSpacySentenceSplitter
splitter = SciSpacySentenceSplitter()
sentences = splitter.split("abcd")
```
Causes
```
[/usr/local/lib/python3.7/dist-packages/flair/tokenization.py](https://localhost:8080/#) in tokenize(self, text)
257 words: List[str] = []
258 for word in sentence:
--> 259 word.append(word)
260 return words
261
AttributeError: 'spacy.tokens.token.Token' object has no attribute 'append'
```
**To Fix**
***First issue***
The first problem is that there is a typo here: https://github.com/flairNLP/flair/blob/480d2c9afd66ab8d3bf40a676917e84dba3c4cee/flair/tokenization.py#L259
It should be `words.append`, not `word.append`.
***Second issue***
the `SciSpacyTokenizer.tokenize()` is supposed to return a list of `str` , but instad it returns a list of Spacy `Token` objects.
Happy to open a PR shortly
| [
{
"content": "import logging\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, List, Union\n\nfrom segtok.segmenter import split_multi, split_single\nfrom segtok.tokenizer import split_contractions, word_tokenizer\n\nfrom flair.data import Sentence, Tokenizer\n\nlog = logging.getLogger(\"flair\")\n\n\nclass SpacyTokenizer(Tokenizer):\n \"\"\"\n Implementation of :class:`Tokenizer`, using models from Spacy.\n\n :param model a Spacy V2 model or the name of the model to load.\n \"\"\"\n\n def __init__(self, model):\n super(SpacyTokenizer, self).__init__()\n\n try:\n import spacy\n from spacy.language import Language\n except ImportError:\n raise ImportError(\n \"Please install Spacy v2.0 or better before using the Spacy tokenizer, \"\n \"otherwise you can use SegtokTokenizer as advanced tokenizer.\"\n )\n\n if isinstance(model, Language):\n self.model: Language = model\n elif isinstance(model, str):\n self.model: Language = spacy.load(model)\n else:\n raise AssertionError(\n \"Unexpected type of parameter model. Please provide a loaded \"\n \"spacy model or the name of the model to load.\"\n )\n\n def tokenize(self, text: str) -> List[str]:\n from spacy.tokens.doc import Doc\n\n doc: Doc = self.model.make_doc(text)\n words: List[str] = []\n for word in doc:\n if len(word.text.strip()) == 0:\n continue\n words.append(word.text)\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.model.meta[\"name\"] + \"_\" + self.model.meta[\"version\"]\n\n\nclass SegtokTokenizer(Tokenizer):\n \"\"\"\n Tokenizer using segtok, a third party library dedicated to rules-based Indo-European languages.\n\n For further details see: https://github.com/fnl/segtok\n \"\"\"\n\n def __init__(self):\n super(SegtokTokenizer, self).__init__()\n\n def tokenize(self, text: str) -> List[str]:\n return SegtokTokenizer.run_tokenize(text)\n\n @staticmethod\n def run_tokenize(text: str) -> List[str]:\n words: List[str] = []\n\n sentences = split_single(text)\n for sentence in sentences:\n contractions = split_contractions(word_tokenizer(sentence))\n words.extend(contractions)\n\n words = list(filter(None, words))\n\n return words\n\n\nclass SpaceTokenizer(Tokenizer):\n \"\"\"\n Tokenizer based on space character only.\n \"\"\"\n\n def __init__(self):\n super(SpaceTokenizer, self).__init__()\n\n def tokenize(self, text: str) -> List[str]:\n return SpaceTokenizer.run_tokenize(text)\n\n @staticmethod\n def run_tokenize(text: str) -> List[str]:\n tokens: List[str] = []\n word = \"\"\n index = -1\n for index, char in enumerate(text):\n if char == \" \":\n if len(word) > 0:\n tokens.append(word)\n\n word = \"\"\n else:\n word += char\n # increment for last token in sentence if not followed by whitespace\n index += 1\n if len(word) > 0:\n tokens.append(word)\n\n return tokens\n\n\nclass JapaneseTokenizer(Tokenizer):\n \"\"\"\n Tokenizer using konoha, a third party library which supports\n multiple Japanese tokenizer such as MeCab, Janome and SudachiPy.\n\n For further details see:\n https://github.com/himkt/konoha\n \"\"\"\n\n def __init__(self, tokenizer: str, sudachi_mode: str = \"A\"):\n super(JapaneseTokenizer, self).__init__()\n\n available_tokenizers = [\"mecab\", \"janome\", \"sudachi\"]\n\n if tokenizer.lower() not in available_tokenizers:\n raise NotImplementedError(\n f\"Currently, {tokenizer} is only supported. Supported tokenizers: {available_tokenizers}.\"\n )\n\n try:\n import konoha\n except ModuleNotFoundError:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"konoha\" is not installed!')\n log.warning(\n '- If you want to use MeCab, install mecab with \"sudo apt install mecab libmecab-dev mecab-ipadic\".'\n )\n log.warning('- Install konoha with \"pip install konoha[{tokenizer_name}]\"')\n log.warning(' - You can choose tokenizer from [\"mecab\", \"janome\", \"sudachi\"].')\n log.warning(\"-\" * 100)\n exit()\n\n self.tokenizer = tokenizer\n self.sentence_tokenizer = konoha.SentenceTokenizer()\n self.word_tokenizer = konoha.WordTokenizer(tokenizer, mode=sudachi_mode)\n\n def tokenize(self, text: str) -> List[str]:\n words: List[str] = []\n\n sentences = self.sentence_tokenizer.tokenize(text)\n for sentence in sentences:\n konoha_tokens = self.word_tokenizer.tokenize(sentence)\n words.extend(list(map(str, konoha_tokens)))\n\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tokenizer\n\n\nclass TokenizerWrapper(Tokenizer):\n \"\"\"\n Helper class to wrap tokenizer functions to the class-based tokenizer interface.\n \"\"\"\n\n def __init__(self, tokenizer_func: Callable[[str], List[str]]):\n super(TokenizerWrapper, self).__init__()\n self.tokenizer_func = tokenizer_func\n\n def tokenize(self, text: str) -> List[str]:\n return self.tokenizer_func(text)\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tokenizer_func.__name__\n\n\nclass SciSpacyTokenizer(Tokenizer):\n \"\"\"\n Implementation of :class:`Tokenizer` which uses the en_core_sci_sm Spacy model\n extended by special heuristics to consider characters such as \"(\", \")\" \"-\" as\n additional token separators. The latter distinguishs this implementation from\n :class:`SpacyTokenizer`.\n\n Note, you if you want to use the \"normal\" SciSpacy tokenization just use\n :class:`SpacyTokenizer`.\n \"\"\"\n\n def __init__(self):\n super(SciSpacyTokenizer, self).__init__()\n\n try:\n import spacy\n from spacy.lang import char_classes\n except ImportError:\n raise ImportError(\n \" Please install scispacy version 0.2.5 (recommended) or higher before using the SciSpacy tokenizer, \"\n \"otherwise you can use SegtokTokenizer as alternative implementation.\\n\"\n \" You can install scispacy (version 0.2.5) by running:\\n\\n\"\n \" pip install scispacy==0.2.5\\n\\n\"\n \" By default HunFlair uses the `en_core_sci_sm` model. You can install the model by running:\\n\\n\"\n \" pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.2.5/en_core_sci_sm-0.2.5.tar.gz\\n\\n\"\n \" Note that the scispacy version and the version of the model must match to work properly!\"\n )\n\n def combined_rule_prefixes() -> List[str]:\n \"\"\"Helper function that returns the prefix pattern for the tokenizer.\n It is a helper function to accommodate spacy tests that only test\n prefixes.\n \"\"\"\n prefix_punct = char_classes.PUNCT.replace(\"|\", \" \")\n\n prefixes = (\n [\"§\", \"%\", \"=\", r\"\\+\"]\n + char_classes.split_chars(prefix_punct)\n + char_classes.LIST_ELLIPSES\n + char_classes.LIST_QUOTES\n + char_classes.LIST_CURRENCY\n + char_classes.LIST_ICONS\n )\n return prefixes\n\n infixes = (\n char_classes.LIST_ELLIPSES\n + char_classes.LIST_ICONS\n + [\n r\"×\", # added this special x character to tokenize it separately\n r\"[\\(\\)\\[\\]\\{\\}]\", # want to split at every bracket\n r\"/\", # want to split at every slash\n r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n r\"(?<=[{al}])\\.(?=[{au}])\".format(al=char_classes.ALPHA_LOWER, au=char_classes.ALPHA_UPPER),\n r\"(?<=[{a}]),(?=[{a}])\".format(a=char_classes.ALPHA),\n r'(?<=[{a}])[?\";:=,.]*(?:{h})(?=[{a}])'.format(a=char_classes.ALPHA, h=char_classes.HYPHENS),\n r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=char_classes.ALPHA),\n ]\n )\n\n prefix_re = spacy.util.compile_prefix_regex(combined_rule_prefixes())\n infix_re = spacy.util.compile_infix_regex(infixes)\n\n self.model = spacy.load(\n \"en_core_sci_sm\",\n disable=[\"tagger\", \"ner\", \"parser\", \"textcat\", \"lemmatizer\"],\n )\n self.model.tokenizer.prefix_search = prefix_re.search\n self.model.tokenizer.infix_finditer = infix_re.finditer\n\n def tokenize(self, text: str) -> List[str]:\n sentence = self.model(text)\n words: List[str] = []\n for word in sentence:\n word.append(word)\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.model.meta[\"name\"] + \"_\" + self.model.meta[\"version\"]\n\n\nclass SentenceSplitter(ABC):\n r\"\"\"An abstract class representing a :class:`SentenceSplitter`.\n\n Sentence splitters are used to represent algorithms and models to split plain text into\n sentences and individual tokens / words. All subclasses should overwrite :meth:`splits`,\n which splits the given plain text into a sequence of sentences (:class:`Sentence`). The\n individual sentences are in turn subdivided into tokens / words. In most cases, this can\n be controlled by passing custom implementation of :class:`Tokenizer`.\n\n Moreover, subclasses may overwrite :meth:`name`, returning a unique identifier representing\n the sentence splitter's configuration.\n \"\"\"\n\n @abstractmethod\n def split(self, text: str) -> List[Sentence]:\n raise NotImplementedError()\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n @property\n def tokenizer(self) -> Tokenizer:\n raise NotImplementedError()\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n raise NotImplementedError()\n\n\nclass SegtokSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` using the SegTok library.\n\n For further details see: https://github.com/fnl/segtok\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(SegtokSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n plain_sentences: List[str] = split_multi(text)\n sentence_offset = 0\n\n sentences: List[Sentence] = []\n for sentence in plain_sentences:\n try:\n sentence_offset = text.index(sentence, sentence_offset)\n except ValueError as error:\n raise AssertionError(\n f\"Can't find the sentence offset for sentence {repr(sentence)} \"\n f\"starting from position {repr(sentence_offset)}\"\n ) from error\n sentences.append(\n Sentence(\n text=sentence,\n use_tokenizer=self._tokenizer,\n start_position=sentence_offset,\n )\n )\n\n sentence_offset += len(sentence)\n\n return sentences\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n\nclass SpacySentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter`, using models from Spacy.\n\n :param model Spacy V2 model or the name of the model to load.\n :param tokenizer Custom tokenizer to use (default :class:`SpacyTokenizer`)\n \"\"\"\n\n def __init__(self, model: Union[Any, str], tokenizer: Tokenizer = None):\n super(SpacySentenceSplitter, self).__init__()\n\n try:\n import spacy\n from spacy.language import Language\n except ImportError:\n raise ImportError(\n \"Please install spacy v2.3.2 or higher before using the SpacySentenceSplitter, \"\n \"otherwise you can use SegtokSentenceSplitter as alternative implementation.\"\n )\n\n if isinstance(model, Language):\n self.model: Language = model\n else:\n assert isinstance(model, str)\n self.model = spacy.load(model)\n\n if tokenizer is None:\n self._tokenizer: Tokenizer = SpacyTokenizer(\"en_core_sci_sm\")\n else:\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n document = self.model(text)\n\n sentences = [\n Sentence(\n text=str(spacy_sent),\n use_tokenizer=self._tokenizer,\n start_position=spacy_sent.start_char,\n )\n for spacy_sent in document.sents\n if len(str(spacy_sent)) > 0\n ]\n\n return sentences\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return (\n self.__class__.__name__\n + \"_\"\n + self.model.meta[\"name\"]\n + \"_\"\n + self.model.meta[\"version\"]\n + \"_\"\n + self._tokenizer.name\n )\n\n\nclass SciSpacySentenceSplitter(SpacySentenceSplitter):\n \"\"\"\n Convenience class to instantiate :class:`SpacySentenceSplitter` with Spacy model `en_core_sci_sm`\n for sentence splitting and :class:`SciSpacyTokenizer` as tokenizer.\n \"\"\"\n\n def __init__(self):\n super(SciSpacySentenceSplitter, self).__init__(\"en_core_sci_sm\", SciSpacyTokenizer())\n\n\nclass TagSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` which assumes that there is a special tag within\n the text that is used to mark sentence boundaries.\n \"\"\"\n\n def __init__(self, tag: str, tokenizer: Tokenizer = SegtokTokenizer()):\n super(TagSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n self.tag = tag\n\n def split(self, text: str) -> List[Sentence]:\n plain_sentences = text.split(self.tag)\n\n sentences = []\n last_offset = 0\n\n for sentence in plain_sentences:\n if len(sentence.strip()) == 0:\n continue\n\n sentences += [\n Sentence(\n text=sentence,\n use_tokenizer=self._tokenizer,\n start_position=last_offset,\n )\n ]\n\n last_offset += len(sentence) + len(self.tag)\n\n return sentences\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tag + \"_\" + self._tokenizer.name\n\n\nclass NewlineSentenceSplitter(TagSentenceSplitter):\n \"\"\"\n Convenience class to instantiate :class:`SentenceTagSplitter` with newline (\"\\n\") as\n sentence boundary marker.\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(NewlineSentenceSplitter, self).__init__(tag=\"\\n\", tokenizer=tokenizer)\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self._tokenizer.name\n\n\nclass NoSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` which treats the complete text as one sentence.\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(NoSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n return [Sentence(text=text, use_tokenizer=self._tokenizer, start_position=0)]\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self._tokenizer.name\n",
"path": "flair/tokenization.py"
}
] | [
{
"content": "import logging\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, List, Union\n\nfrom segtok.segmenter import split_multi, split_single\nfrom segtok.tokenizer import split_contractions, word_tokenizer\n\nfrom flair.data import Sentence, Tokenizer\n\nlog = logging.getLogger(\"flair\")\n\n\nclass SpacyTokenizer(Tokenizer):\n \"\"\"\n Implementation of :class:`Tokenizer`, using models from Spacy.\n\n :param model a Spacy V2 model or the name of the model to load.\n \"\"\"\n\n def __init__(self, model):\n super(SpacyTokenizer, self).__init__()\n\n try:\n import spacy\n from spacy.language import Language\n except ImportError:\n raise ImportError(\n \"Please install Spacy v2.0 or better before using the Spacy tokenizer, \"\n \"otherwise you can use SegtokTokenizer as advanced tokenizer.\"\n )\n\n if isinstance(model, Language):\n self.model: Language = model\n elif isinstance(model, str):\n self.model: Language = spacy.load(model)\n else:\n raise AssertionError(\n \"Unexpected type of parameter model. Please provide a loaded \"\n \"spacy model or the name of the model to load.\"\n )\n\n def tokenize(self, text: str) -> List[str]:\n from spacy.tokens.doc import Doc\n\n doc: Doc = self.model.make_doc(text)\n words: List[str] = []\n for word in doc:\n if len(word.text.strip()) == 0:\n continue\n words.append(word.text)\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.model.meta[\"name\"] + \"_\" + self.model.meta[\"version\"]\n\n\nclass SegtokTokenizer(Tokenizer):\n \"\"\"\n Tokenizer using segtok, a third party library dedicated to rules-based Indo-European languages.\n\n For further details see: https://github.com/fnl/segtok\n \"\"\"\n\n def __init__(self):\n super(SegtokTokenizer, self).__init__()\n\n def tokenize(self, text: str) -> List[str]:\n return SegtokTokenizer.run_tokenize(text)\n\n @staticmethod\n def run_tokenize(text: str) -> List[str]:\n words: List[str] = []\n\n sentences = split_single(text)\n for sentence in sentences:\n contractions = split_contractions(word_tokenizer(sentence))\n words.extend(contractions)\n\n words = list(filter(None, words))\n\n return words\n\n\nclass SpaceTokenizer(Tokenizer):\n \"\"\"\n Tokenizer based on space character only.\n \"\"\"\n\n def __init__(self):\n super(SpaceTokenizer, self).__init__()\n\n def tokenize(self, text: str) -> List[str]:\n return SpaceTokenizer.run_tokenize(text)\n\n @staticmethod\n def run_tokenize(text: str) -> List[str]:\n tokens: List[str] = []\n word = \"\"\n index = -1\n for index, char in enumerate(text):\n if char == \" \":\n if len(word) > 0:\n tokens.append(word)\n\n word = \"\"\n else:\n word += char\n # increment for last token in sentence if not followed by whitespace\n index += 1\n if len(word) > 0:\n tokens.append(word)\n\n return tokens\n\n\nclass JapaneseTokenizer(Tokenizer):\n \"\"\"\n Tokenizer using konoha, a third party library which supports\n multiple Japanese tokenizer such as MeCab, Janome and SudachiPy.\n\n For further details see:\n https://github.com/himkt/konoha\n \"\"\"\n\n def __init__(self, tokenizer: str, sudachi_mode: str = \"A\"):\n super(JapaneseTokenizer, self).__init__()\n\n available_tokenizers = [\"mecab\", \"janome\", \"sudachi\"]\n\n if tokenizer.lower() not in available_tokenizers:\n raise NotImplementedError(\n f\"Currently, {tokenizer} is only supported. Supported tokenizers: {available_tokenizers}.\"\n )\n\n try:\n import konoha\n except ModuleNotFoundError:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"konoha\" is not installed!')\n log.warning(\n '- If you want to use MeCab, install mecab with \"sudo apt install mecab libmecab-dev mecab-ipadic\".'\n )\n log.warning('- Install konoha with \"pip install konoha[{tokenizer_name}]\"')\n log.warning(' - You can choose tokenizer from [\"mecab\", \"janome\", \"sudachi\"].')\n log.warning(\"-\" * 100)\n exit()\n\n self.tokenizer = tokenizer\n self.sentence_tokenizer = konoha.SentenceTokenizer()\n self.word_tokenizer = konoha.WordTokenizer(tokenizer, mode=sudachi_mode)\n\n def tokenize(self, text: str) -> List[str]:\n words: List[str] = []\n\n sentences = self.sentence_tokenizer.tokenize(text)\n for sentence in sentences:\n konoha_tokens = self.word_tokenizer.tokenize(sentence)\n words.extend(list(map(str, konoha_tokens)))\n\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tokenizer\n\n\nclass TokenizerWrapper(Tokenizer):\n \"\"\"\n Helper class to wrap tokenizer functions to the class-based tokenizer interface.\n \"\"\"\n\n def __init__(self, tokenizer_func: Callable[[str], List[str]]):\n super(TokenizerWrapper, self).__init__()\n self.tokenizer_func = tokenizer_func\n\n def tokenize(self, text: str) -> List[str]:\n return self.tokenizer_func(text)\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tokenizer_func.__name__\n\n\nclass SciSpacyTokenizer(Tokenizer):\n \"\"\"\n Implementation of :class:`Tokenizer` which uses the en_core_sci_sm Spacy model\n extended by special heuristics to consider characters such as \"(\", \")\" \"-\" as\n additional token separators. The latter distinguishs this implementation from\n :class:`SpacyTokenizer`.\n\n Note, you if you want to use the \"normal\" SciSpacy tokenization just use\n :class:`SpacyTokenizer`.\n \"\"\"\n\n def __init__(self):\n super(SciSpacyTokenizer, self).__init__()\n\n try:\n import spacy\n from spacy.lang import char_classes\n except ImportError:\n raise ImportError(\n \" Please install scispacy version 0.2.5 (recommended) or higher before using the SciSpacy tokenizer, \"\n \"otherwise you can use SegtokTokenizer as alternative implementation.\\n\"\n \" You can install scispacy (version 0.2.5) by running:\\n\\n\"\n \" pip install scispacy==0.2.5\\n\\n\"\n \" By default HunFlair uses the `en_core_sci_sm` model. You can install the model by running:\\n\\n\"\n \" pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.2.5/en_core_sci_sm-0.2.5.tar.gz\\n\\n\"\n \" Note that the scispacy version and the version of the model must match to work properly!\"\n )\n\n def combined_rule_prefixes() -> List[str]:\n \"\"\"Helper function that returns the prefix pattern for the tokenizer.\n It is a helper function to accommodate spacy tests that only test\n prefixes.\n \"\"\"\n prefix_punct = char_classes.PUNCT.replace(\"|\", \" \")\n\n prefixes = (\n [\"§\", \"%\", \"=\", r\"\\+\"]\n + char_classes.split_chars(prefix_punct)\n + char_classes.LIST_ELLIPSES\n + char_classes.LIST_QUOTES\n + char_classes.LIST_CURRENCY\n + char_classes.LIST_ICONS\n )\n return prefixes\n\n infixes = (\n char_classes.LIST_ELLIPSES\n + char_classes.LIST_ICONS\n + [\n r\"×\", # added this special x character to tokenize it separately\n r\"[\\(\\)\\[\\]\\{\\}]\", # want to split at every bracket\n r\"/\", # want to split at every slash\n r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n r\"(?<=[{al}])\\.(?=[{au}])\".format(al=char_classes.ALPHA_LOWER, au=char_classes.ALPHA_UPPER),\n r\"(?<=[{a}]),(?=[{a}])\".format(a=char_classes.ALPHA),\n r'(?<=[{a}])[?\";:=,.]*(?:{h})(?=[{a}])'.format(a=char_classes.ALPHA, h=char_classes.HYPHENS),\n r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=char_classes.ALPHA),\n ]\n )\n\n prefix_re = spacy.util.compile_prefix_regex(combined_rule_prefixes())\n infix_re = spacy.util.compile_infix_regex(infixes)\n\n self.model = spacy.load(\n \"en_core_sci_sm\",\n disable=[\"tagger\", \"ner\", \"parser\", \"textcat\", \"lemmatizer\"],\n )\n self.model.tokenizer.prefix_search = prefix_re.search\n self.model.tokenizer.infix_finditer = infix_re.finditer\n\n def tokenize(self, text: str) -> List[str]:\n sentence = self.model(text)\n words: List[str] = []\n for word in sentence:\n words.append(word.text)\n return words\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.model.meta[\"name\"] + \"_\" + self.model.meta[\"version\"]\n\n\nclass SentenceSplitter(ABC):\n r\"\"\"An abstract class representing a :class:`SentenceSplitter`.\n\n Sentence splitters are used to represent algorithms and models to split plain text into\n sentences and individual tokens / words. All subclasses should overwrite :meth:`splits`,\n which splits the given plain text into a sequence of sentences (:class:`Sentence`). The\n individual sentences are in turn subdivided into tokens / words. In most cases, this can\n be controlled by passing custom implementation of :class:`Tokenizer`.\n\n Moreover, subclasses may overwrite :meth:`name`, returning a unique identifier representing\n the sentence splitter's configuration.\n \"\"\"\n\n @abstractmethod\n def split(self, text: str) -> List[Sentence]:\n raise NotImplementedError()\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n @property\n def tokenizer(self) -> Tokenizer:\n raise NotImplementedError()\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n raise NotImplementedError()\n\n\nclass SegtokSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` using the SegTok library.\n\n For further details see: https://github.com/fnl/segtok\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(SegtokSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n plain_sentences: List[str] = split_multi(text)\n sentence_offset = 0\n\n sentences: List[Sentence] = []\n for sentence in plain_sentences:\n try:\n sentence_offset = text.index(sentence, sentence_offset)\n except ValueError as error:\n raise AssertionError(\n f\"Can't find the sentence offset for sentence {repr(sentence)} \"\n f\"starting from position {repr(sentence_offset)}\"\n ) from error\n sentences.append(\n Sentence(\n text=sentence,\n use_tokenizer=self._tokenizer,\n start_position=sentence_offset,\n )\n )\n\n sentence_offset += len(sentence)\n\n return sentences\n\n @property\n def name(self) -> str:\n return self.__class__.__name__\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n\nclass SpacySentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter`, using models from Spacy.\n\n :param model Spacy V2 model or the name of the model to load.\n :param tokenizer Custom tokenizer to use (default :class:`SpacyTokenizer`)\n \"\"\"\n\n def __init__(self, model: Union[Any, str], tokenizer: Tokenizer = None):\n super(SpacySentenceSplitter, self).__init__()\n\n try:\n import spacy\n from spacy.language import Language\n except ImportError:\n raise ImportError(\n \"Please install spacy v2.3.2 or higher before using the SpacySentenceSplitter, \"\n \"otherwise you can use SegtokSentenceSplitter as alternative implementation.\"\n )\n\n if isinstance(model, Language):\n self.model: Language = model\n else:\n assert isinstance(model, str)\n self.model = spacy.load(model)\n\n if tokenizer is None:\n self._tokenizer: Tokenizer = SpacyTokenizer(\"en_core_sci_sm\")\n else:\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n document = self.model(text)\n\n sentences = [\n Sentence(\n text=str(spacy_sent),\n use_tokenizer=self._tokenizer,\n start_position=spacy_sent.start_char,\n )\n for spacy_sent in document.sents\n if len(str(spacy_sent)) > 0\n ]\n\n return sentences\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return (\n self.__class__.__name__\n + \"_\"\n + self.model.meta[\"name\"]\n + \"_\"\n + self.model.meta[\"version\"]\n + \"_\"\n + self._tokenizer.name\n )\n\n\nclass SciSpacySentenceSplitter(SpacySentenceSplitter):\n \"\"\"\n Convenience class to instantiate :class:`SpacySentenceSplitter` with Spacy model `en_core_sci_sm`\n for sentence splitting and :class:`SciSpacyTokenizer` as tokenizer.\n \"\"\"\n\n def __init__(self):\n super(SciSpacySentenceSplitter, self).__init__(\"en_core_sci_sm\", SciSpacyTokenizer())\n\n\nclass TagSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` which assumes that there is a special tag within\n the text that is used to mark sentence boundaries.\n \"\"\"\n\n def __init__(self, tag: str, tokenizer: Tokenizer = SegtokTokenizer()):\n super(TagSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n self.tag = tag\n\n def split(self, text: str) -> List[Sentence]:\n plain_sentences = text.split(self.tag)\n\n sentences = []\n last_offset = 0\n\n for sentence in plain_sentences:\n if len(sentence.strip()) == 0:\n continue\n\n sentences += [\n Sentence(\n text=sentence,\n use_tokenizer=self._tokenizer,\n start_position=last_offset,\n )\n ]\n\n last_offset += len(sentence) + len(self.tag)\n\n return sentences\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self.tag + \"_\" + self._tokenizer.name\n\n\nclass NewlineSentenceSplitter(TagSentenceSplitter):\n \"\"\"\n Convenience class to instantiate :class:`SentenceTagSplitter` with newline (\"\\n\") as\n sentence boundary marker.\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(NewlineSentenceSplitter, self).__init__(tag=\"\\n\", tokenizer=tokenizer)\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self._tokenizer.name\n\n\nclass NoSentenceSplitter(SentenceSplitter):\n \"\"\"\n Implementation of :class:`SentenceSplitter` which treats the complete text as one sentence.\n \"\"\"\n\n def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()):\n super(NoSentenceSplitter, self).__init__()\n self._tokenizer = tokenizer\n\n def split(self, text: str) -> List[Sentence]:\n return [Sentence(text=text, use_tokenizer=self._tokenizer, start_position=0)]\n\n @property\n def tokenizer(self) -> Tokenizer:\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, value: Tokenizer):\n self._tokenizer = value\n\n @property\n def name(self) -> str:\n return self.__class__.__name__ + \"_\" + self._tokenizer.name\n",
"path": "flair/tokenization.py"
}
] | diff --git a/flair/tokenization.py b/flair/tokenization.py
index 33134ad555..c285c2ddd0 100644
--- a/flair/tokenization.py
+++ b/flair/tokenization.py
@@ -256,7 +256,7 @@ def tokenize(self, text: str) -> List[str]:
sentence = self.model(text)
words: List[str] = []
for word in sentence:
- word.append(word)
+ words.append(word.text)
return words
@property
|
typeddjango__django-stubs-1794 | Next release planning (4.2.6)
As [announced in 4.2.5 release notes](https://github.com/typeddjango/django-stubs/releases/tag/4.2.5), we will drop the hard dependency on mypy. Users of django-stubs with mypy will need to add their own dependency on mypy, or install `django-stubs[compatible-mypy]` extra.
I'm hoping to release this in less than a week to fix a few bugs.
Blockers:
* #1778 (fixes a bug introduced in version 4.2.5)
* #1782
* #1784
* #1786
setup.py install is deprecated
Will need to deal with this at one point. https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html
This warning shows up when making a release:
```
.../site-packages/setuptools/_distutils/cmd.py:66: SetuptoolsDeprecationWarning: setup.py install is deprecated.
!!
********************************************************************************
Please avoid running ``setup.py`` directly.
Instead, use pypa/build, pypa/installer or other
standards-based tools.
See https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html for details.
********************************************************************************
!!
```
| [
{
"content": "#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.5\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.6\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/.gitignore b/.gitignore
index cc1ac47cb..1141379bd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,4 @@ out/
pip-wheel-metadata/
stubgen/
build/
+dist/
diff --git a/README.md b/README.md
index a578e8267..c7cd38c1d 100644
--- a/README.md
+++ b/README.md
@@ -49,6 +49,7 @@ We rely on different `django` and `mypy` versions:
| django-stubs | Mypy version | Django version | Django partial support | Python version |
|----------------|--------------|----------------|------------------------|----------------|
+| 4.2.6 | 1.6.x | 4.2 | 4.1, 3.2 | 3.8 - 3.12 |
| 4.2.5 | 1.6.x | 4.2 | 4.1, 3.2 | 3.8 - 3.12 |
| 4.2.4 | 1.5.x | 4.2 | 4.1, 3.2 | 3.8 - 3.11 |
| 4.2.3 | 1.4.x | 4.2 | 4.1, 3.2 | 3.8 - 3.11 |
diff --git a/scripts/release.sh b/scripts/release.sh
index 71281961a..de8ab2da6 100755
--- a/scripts/release.sh
+++ b/scripts/release.sh
@@ -5,9 +5,9 @@ if [[ -z $(git status -s) ]]
then
if [[ "$VIRTUAL_ENV" != "" ]]
then
- pip install --upgrade setuptools wheel twine
+ pip install --upgrade setuptools wheel twine build
rm -rf dist/ build/
- python setup.py sdist bdist_wheel
+ python -m build
twine upload dist/*
rm -rf dist/ build/
else
diff --git a/setup.py b/setup.py
index 5973d2e63..18bb048eb 100644
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@ def find_stub_files(name: str) -> List[str]:
setup(
name="django-stubs",
- version="4.2.5",
+ version="4.2.6",
description="Mypy stubs for Django",
long_description=readme,
long_description_content_type="text/markdown",
|
oppia__oppia-6846 | Adding Lesson Topics to Lesson-Specific Landing Pages
**Is your feature request related to a problem? Please describe.**
Currently, our lesson landing pages don't include many of the keywords related to the lessons themselves, which makes them more difficult to surface in searches and in our ads.
**Describe the solution you'd like**
I would like to add lesson topics/areas to the lesson landing page (as seen in the screenshot below). In mobile view, the Topics covered list will be seen above the Otter in one column.
Also note that Mark recommended using a more colorful cake, like the one seen in the screenshot below, for the Fractions landing page.
**Describe alternatives you've considered**
I've also considered adding more keywords by adding exploration titles to the collection landing pages to increase relevancy to those pages as well.
**Additional context**
<img width="499" alt="Screenshot 2019-05-24 14 01 05" src="https://user-images.githubusercontent.com/12034267/58350733-60d8ea00-7e2c-11e9-91e5-7d934471f1f6.png">
<img width="499" alt="Screenshot 2019-05-24 14 00 24" src="https://user-images.githubusercontent.com/12034267/58350707-4868cf80-7e2c-11e9-8734-497549b6464c.png">
Adding Lesson Topics to Lesson-Specific Landing Pages
**Is your feature request related to a problem? Please describe.**
Currently, our lesson landing pages don't include many of the keywords related to the lessons themselves, which makes them more difficult to surface in searches and in our ads.
**Describe the solution you'd like**
I would like to add lesson topics/areas to the lesson landing page (as seen in the screenshot below). In mobile view, the Topics covered list will be seen above the Otter in one column.
Also note that Mark recommended using a more colorful cake, like the one seen in the screenshot below, for the Fractions landing page.
**Describe alternatives you've considered**
I've also considered adding more keywords by adding exploration titles to the collection landing pages to increase relevancy to those pages as well.
**Additional context**
<img width="499" alt="Screenshot 2019-05-24 14 01 05" src="https://user-images.githubusercontent.com/12034267/58350733-60d8ea00-7e2c-11e9-91e5-7d934471f1f6.png">
<img width="499" alt="Screenshot 2019-05-24 14 00 24" src="https://user-images.githubusercontent.com/12034267/58350707-4868cf80-7e2c-11e9-8734-497549b6464c.png">
| [
{
"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stores various configuration options and constants for Oppia.\"\"\"\n\nimport copy\nimport datetime\nimport os\n\nfrom constants import constants # pylint: disable=relative-import\n\n# Whether to unconditionally log info messages.\nDEBUG = False\n\n# When DEV_MODE is true check that we are running in development environment.\n# The SERVER_SOFTWARE environment variable does not exist in Travis, hence the\n# need for an explicit check.\nif (constants.DEV_MODE and os.getenv('SERVER_SOFTWARE') and\n not os.getenv('SERVER_SOFTWARE', default='').startswith('Development')):\n raise Exception('DEV_MODE can\\'t be true on production.')\n\nCLASSIFIERS_DIR = os.path.join('extensions', 'classifiers')\nTESTS_DATA_DIR = os.path.join('core', 'tests', 'data')\nSAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')\nSAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections')\nCONTENT_VALIDATION_DIR = os.path.join('core', 'domain')\n\nEXTENSIONS_DIR_PREFIX = (\n 'backend_prod_files' if not constants.DEV_MODE else '')\nACTIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions'))\nISSUES_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues'))\nINTERACTIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'interactions'))\nRTE_EXTENSIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'rich_text_components'))\nRTE_EXTENSIONS_DEFINITIONS_PATH = (\n os.path.join('assets', 'rich_text_components_definitions.js'))\n\nOBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')\n\n# Choose production templates folder when we are in production mode.\nif not constants.DEV_MODE:\n FRONTEND_TEMPLATES_DIR = (\n os.path.join('backend_prod_files', 'templates', 'head'))\nelse:\n FRONTEND_TEMPLATES_DIR = os.path.join('core', 'templates', 'dev', 'head')\nDEPENDENCIES_TEMPLATES_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies'))\n\nVALUE_GENERATORS_DIR_FOR_JS = os.path.join(\n 'local_compiled_js', 'extensions', 'value_generators')\nVALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')\n\nVISUALIZATIONS_DIR = os.path.join(\n 'extensions', 'visualizations')\nVISUALIZATIONS_DIR_FOR_JS = os.path.join(\n 'local_compiled_js', 'extensions', 'visualizations')\n\nOBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join(\n 'extensions', 'objects', 'object_defaults.json')\nRULES_DESCRIPTIONS_FILE_PATH = os.path.join(\n os.getcwd(), 'extensions', 'interactions', 'rule_templates.json')\n\n# A mapping of interaction ids to classifier properties.\nINTERACTION_CLASSIFIER_MAPPING = {\n 'TextInput': {\n 'algorithm_id': 'TextClassifier',\n 'current_data_schema_version': 1\n },\n 'CodeRepl': {\n 'algorithm_id': 'CodeClassifier',\n 'current_data_schema_version': 1\n }\n}\n# Classifier job time to live (in mins).\nCLASSIFIER_JOB_TTL_MINS = 5\nTRAINING_JOB_STATUS_COMPLETE = 'COMPLETE'\nTRAINING_JOB_STATUS_FAILED = 'FAILED'\nTRAINING_JOB_STATUS_NEW = 'NEW'\nTRAINING_JOB_STATUS_PENDING = 'PENDING'\n\nALLOWED_TRAINING_JOB_STATUSES = [\n TRAINING_JOB_STATUS_COMPLETE,\n TRAINING_JOB_STATUS_FAILED,\n TRAINING_JOB_STATUS_NEW,\n TRAINING_JOB_STATUS_PENDING\n]\n\n# The maximum number of characters allowed for userbio length.\nMAX_BIO_LENGTH_IN_CHARS = 2000\n\nALLOWED_TRAINING_JOB_STATUS_CHANGES = {\n TRAINING_JOB_STATUS_COMPLETE: [],\n TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING],\n TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE,\n TRAINING_JOB_STATUS_FAILED],\n TRAINING_JOB_STATUS_FAILED: [TRAINING_JOB_STATUS_NEW]\n}\n\nENTITY_TYPE_EXPLORATION = 'exploration'\nENTITY_TYPE_TOPIC = 'topic'\n\n# The maximum number of activities allowed in the playlist of the learner. This\n# limit applies to both the explorations playlist and the collections playlist.\nMAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = 10\n\n# The minimum number of training samples required for training a classifier.\nMIN_TOTAL_TRAINING_EXAMPLES = 50\n\n# The minimum number of assigned labels required for training a classifier.\nMIN_ASSIGNED_LABELS = 2\n\n# Default label for classification algorithms.\nDEFAULT_CLASSIFIER_LABEL = '_default'\n\n# The maximum number of results to retrieve in a datastore query.\nDEFAULT_QUERY_LIMIT = 1000\n\n# The maximum number of results to retrieve in a datastore query\n# for top rated published explorations in /library page.\nNUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8\n\n# The maximum number of results to retrieve in a datastore query\n# for recently published explorations in /library page.\nRECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8\n\n# The maximum number of results to retrieve in a datastore query\n# for top rated published explorations in /library/top_rated page.\nNUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20\n\n# The maximum number of results to retrieve in a datastore query\n# for recently published explorations in /library/recently_published page.\nRECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20\n\n# The current version of the dashboard stats blob schema. If any backward-\n# incompatible changes are made to the stats blob schema in the data store,\n# this version number must be changed.\nCURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1\n\n# The current version of the exploration states blob schema. If any backward-\n# incompatible changes are made to the states blob schema in the data store,\n# this version number must be changed and the exploration migration job\n# executed.\nCURRENT_STATE_SCHEMA_VERSION = 28\n\n# The current version of the all collection blob schemas (such as the nodes\n# structure within the Collection domain object). If any backward-incompatible\n# changes are made to any of the blob schemas in the data store, this version\n# number must be changed.\nCURRENT_COLLECTION_SCHEMA_VERSION = 6\n\n# The current version of story contents dict in the story schema.\nCURRENT_STORY_CONTENTS_SCHEMA_VERSION = 1\n\n# The current version of skill contents dict in the skill schema.\nCURRENT_SKILL_CONTENTS_SCHEMA_VERSION = 1\n\n# The current version of misconceptions dict in the skill schema.\nCURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 1\n\n# The current version of subtopics dict in the topic schema.\nCURRENT_SUBTOPIC_SCHEMA_VERSION = 1\n\n# The current version of page_contents dict in the subtopic page schema.\nCURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION = 1\n\n# This value should be updated in the event of any\n# StateAnswersModel.submitted_answer_list schema change.\nCURRENT_STATE_ANSWERS_SCHEMA_VERSION = 1\n\n# The default number of exploration tiles to load at a time in the search\n# results page.\nSEARCH_RESULTS_PAGE_SIZE = 20\n\n# The default number of commits to show on a page in the exploration history\n# tab.\nCOMMIT_LIST_PAGE_SIZE = 50\n\n# The default number of items to show on a page in the exploration feedback\n# tab.\nFEEDBACK_TAB_PAGE_SIZE = 20\n\n# The maximum number of top unresolved answers which should be aggregated\n# from all of the submitted answers.\nTOP_UNRESOLVED_ANSWERS_LIMIT = 20\n\n# Default title for a newly-minted exploration.\nDEFAULT_EXPLORATION_TITLE = ''\n# Default category for a newly-minted exploration.\nDEFAULT_EXPLORATION_CATEGORY = ''\n# Default objective for a newly-minted exploration.\nDEFAULT_EXPLORATION_OBJECTIVE = ''\n\n# NOTE TO DEVELOPERS: If any of the 5 constants below are modified, the\n# corresponding field in NEW_STATE_TEMPLATE in constants.js also has to be\n# modified.\n\n# Default name for the initial state of an exploration.\nDEFAULT_INIT_STATE_NAME = 'Introduction'\n# Default content id for the state's content.\nDEFAULT_NEW_STATE_CONTENT_ID = 'content'\n# Default content id for the interaction's default outcome.\nDEFAULT_OUTCOME_CONTENT_ID = 'default_outcome'\n# Default content id for the explanation in the concept card of a skill.\nDEFAULT_EXPLANATION_CONTENT_ID = 'explanation'\n# Default recorded_voiceovers dict for a default state template.\nDEFAULT_RECORDED_VOICEOVERS = {\n 'voiceovers_mapping': {\n 'content': {},\n 'default_outcome': {}\n }\n}\n# Default written_translations dict for a default state template.\nDEFAULT_WRITTEN_TRANSLATIONS = {\n 'translations_mapping': {\n 'content': {},\n 'default_outcome': {}\n }\n}\n# The default content text for the initial state of an exploration.\nDEFAULT_INIT_STATE_CONTENT_STR = ''\n\n# Whether new explorations should have automatic text-to-speech enabled\n# by default.\nDEFAULT_AUTO_TTS_ENABLED = True\n\n# Default title for a newly-minted collection.\nDEFAULT_COLLECTION_TITLE = ''\n# Default category for a newly-minted collection.\nDEFAULT_COLLECTION_CATEGORY = ''\n# Default objective for a newly-minted collection.\nDEFAULT_COLLECTION_OBJECTIVE = ''\n\n# Default description for a newly-minted story.\nDEFAULT_STORY_DESCRIPTION = ''\n# Default notes for a newly-minted story.\nDEFAULT_STORY_NOTES = ''\n\n# Default explanation for a newly-minted skill.\nDEFAULT_SKILL_EXPLANATION = ''\n# Default name for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_NAME = ''\n# Default notes for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_NOTES = ''\n# Default feedback for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_FEEDBACK = ''\n# Default content_id for explanation subtitled html.\nDEFAULT_SKILL_EXPLANATION_CONTENT_ID = 'explanation'\n\n# Default description for a newly-minted topic.\nDEFAULT_TOPIC_DESCRIPTION = ''\n# Default content id for the subtopic page's content.\nDEFAULT_SUBTOPIC_PAGE_CONTENT_ID = 'content'\n\n# Default ID of VM which is used for training classifier.\nDEFAULT_VM_ID = 'vm_default'\n# Shared secret key for default VM.\nDEFAULT_VM_SHARED_SECRET = '1a2b3c4e'\n\n# An array containing the accepted image formats (as determined by the imghdr\n# module) and the corresponding allowed extensions in the filenames of uploaded\n# images.\nACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {\n 'jpeg': ['jpg', 'jpeg'],\n 'png': ['png'],\n 'gif': ['gif'],\n}\n\n# An array containing the accepted audio extensions for uploaded files and\n# the corresponding MIME types.\nACCEPTED_AUDIO_EXTENSIONS = {\n 'mp3': ['audio/mp3']\n}\n\n# Prefix for data sent from the server to the client via JSON.\nXSSI_PREFIX = ')]}\\'\\n'\n# A regular expression for alphanumeric characters.\nALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'\n# A regular expression for tags.\nTAG_REGEX = r'^[a-z ]+$'\n\n# Invalid names for parameters used in expressions.\nAUTOMATICALLY_SET_PARAMETER_NAMES = ['answer', 'choices']\nINVALID_PARAMETER_NAMES = AUTOMATICALLY_SET_PARAMETER_NAMES + [\n 'abs', 'all', 'and', 'any', 'else', 'floor', 'if', 'log', 'or',\n 'pow', 'round', 'then']\n\n# These are here rather than in rating_services.py to avoid import\n# circularities with exp_services.\n# TODO (Jacob) Refactor exp_services to remove this problem.\n_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}\n\n\ndef get_empty_ratings():\n \"\"\"Returns a copy of the empty ratings object.\n\n Returns:\n dict. Copy of the '_EMPTY_RATINGS' dict object which contains the empty\n ratings.\n \"\"\"\n return copy.deepcopy(_EMPTY_RATINGS)\n\n\n# Empty scaled average rating as a float.\nEMPTY_SCALED_AVERAGE_RATING = 0.0\n\n# To use GAE email service.\nEMAIL_SERVICE_PROVIDER_GAE = 'gae_email_service'\n# To use mailgun email service.\nEMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service'\n# Use GAE email service by default.\nEMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_GAE\n# If the Mailgun email API is used, the \"None\" below should be replaced\n# with the Mailgun API key.\nMAILGUN_API_KEY = None\n# If the Mailgun email API is used, the \"None\" below should be replaced\n# with the Mailgun domain name (ending with mailgun.org).\nMAILGUN_DOMAIN_NAME = None\n\n# Committer id for system actions.\nSYSTEM_COMMITTER_ID = 'admin'\n# Domain name for email address.\nINCOMING_EMAILS_DOMAIN_NAME = 'example.com'\nSYSTEM_EMAIL_ADDRESS = '[email protected]'\nSYSTEM_EMAIL_NAME = '.'\nADMIN_EMAIL_ADDRESS = '[email protected]'\nNOREPLY_EMAIL_ADDRESS = '[email protected]'\n# Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and\n# correspond to owners of the app before setting this to True. If\n# SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this\n# address cannot be sent. If True then emails can be sent to any user.\nCAN_SEND_EMAILS = False\n# If you want to turn on this facility please check the email templates in the\n# send_role_notification_email() function in email_manager.py and modify them\n# accordingly.\nCAN_SEND_EDITOR_ROLE_EMAILS = False\n# If enabled then emails will be sent to creators for feedback messages.\nCAN_SEND_FEEDBACK_MESSAGE_EMAILS = False\n# If enabled subscription emails will be sent to that user.\nCAN_SEND_SUBSCRIPTION_EMAILS = False\n# Time to wait before sending feedback message emails (currently set to 1\n# hour).\nDEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600\n# Whether to send an email when new feedback message is received for\n# an exploration.\nDEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True\n# Whether to send an email to all the creator's subscribers when he/she\n# publishes an exploration.\nDEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE = True\n# Whether exploration feedback emails are muted,\n# when the user has not specified a preference.\nDEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE = False\n# Whether exploration suggestion emails are muted,\n# when the user has not specified a preference.\nDEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE = False\n# Whether to send email updates to a user who has not specified a preference.\nDEFAULT_EMAIL_UPDATES_PREFERENCE = False\n# Whether to send an invitation email when the user is granted\n# new role permissions in an exploration.\nDEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True\n# Whether to require an email to be sent, following a moderator action.\nREQUIRE_EMAIL_ON_MODERATOR_ACTION = False\n# Timespan in minutes before allowing duplicate emails.\nDUPLICATE_EMAIL_INTERVAL_MINS = 2\n# Number of digits after decimal to which the average ratings value in the\n# dashboard is rounded off to.\nAVERAGE_RATINGS_DASHBOARD_PRECISION = 2\n# Whether to enable maintenance mode on the site. For non-admins, this redirects\n# all HTTP requests to the maintenance page. This is the only check which\n# determines whether the site is in maintenance mode to avoid queries to the\n# database by non-admins.\nENABLE_MAINTENANCE_MODE = False\n\n# The interactions permissible for a question.\nALLOWED_QUESTION_INTERACTION_IDS = [\n 'TextInput', 'MultipleChoiceInput', 'NumericInput']\n\n# Flag to disable sending emails related to reviews for suggestions. To be\n# flipped after deciding (and implementing) whether a user should be scored\n# only for curated lessons.\nSEND_SUGGESTION_REVIEW_RELATED_EMAILS = False\n# To prevent recording scores for users until details like whether to score\n# users for only curated lessons is confirmed.\nENABLE_RECORDING_OF_SCORES = False\n\n# No. of pretest questions to display.\nNUM_PRETEST_QUESTIONS = 3\n\n# Whether to automatically accept suggestions after a threshold time.\nENABLE_AUTO_ACCEPT_OF_SUGGESTIONS = False\n\nEMAIL_INTENT_SIGNUP = 'signup'\nEMAIL_INTENT_DAILY_BATCH = 'daily_batch'\nEMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification'\nEMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification'\nEMAIL_INTENT_SUBSCRIPTION_NOTIFICATION = 'subscription_notification'\nEMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification'\nEMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content'\nEMAIL_INTENT_MARKETING = 'marketing'\nEMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration'\nEMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration'\nEMAIL_INTENT_QUERY_STATUS_NOTIFICATION = 'query_status_notification'\nEMAIL_INTENT_ONBOARD_REVIEWER = 'onboard_reviewer'\nEMAIL_INTENT_REVIEW_SUGGESTIONS = 'review_suggestions'\n# Possible intents for email sent in bulk.\nBULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing'\nBULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration'\nBULK_EMAIL_INTENT_CREATE_EXPLORATION = 'bulk_email_create_exploration'\nBULK_EMAIL_INTENT_CREATOR_REENGAGEMENT = 'bulk_email_creator_reengagement'\nBULK_EMAIL_INTENT_LEARNER_REENGAGEMENT = 'bulk_email_learner_reengagement'\nBULK_EMAIL_INTENT_TEST = 'bulk_email_test'\n\nMESSAGE_TYPE_FEEDBACK = 'feedback'\nMESSAGE_TYPE_SUGGESTION = 'suggestion'\n\nMODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration'\nDEFAULT_SALUTATION_HTML_FN = (\n lambda recipient_username: 'Hi %s,' % recipient_username)\nDEFAULT_SIGNOFF_HTML_FN = (\n lambda sender_username: (\n 'Thanks!<br>%s (Oppia moderator)' % sender_username))\n\nVALID_MODERATOR_ACTIONS = {\n MODERATOR_ACTION_UNPUBLISH_EXPLORATION: {\n 'email_config': 'unpublish_exploration_email_html_body',\n 'email_subject_fn': (\n lambda exp_title: (\n 'Your Oppia exploration \"%s\" has been unpublished' % exp_title)\n ),\n 'email_intent': 'unpublish_exploration',\n 'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,\n 'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,\n },\n}\n\n# When the site terms were last updated, in UTC.\nREGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0)\n\n# Format of string for dashboard statistics logs.\n# NOTE TO DEVELOPERS: This format should not be changed, since it is used in\n# the existing storage models for UserStatsModel.\nDASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d'\n\n# The maximum size of an uploaded file, in bytes.\nMAX_FILE_SIZE_BYTES = 1048576\n\n# The maximum playback length of an audio file, in seconds.\nMAX_AUDIO_FILE_LENGTH_SEC = 300\n\n# The minimum score required for a user to review suggestions of a particular\n# category.\nMINIMUM_SCORE_REQUIRED_TO_REVIEW = 10\n\n# The prefix for an 'accepted suggestion' commit message.\nCOMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by'\n\n# User id and username for exploration migration bot. Commits made by this bot\n# are not reflected in the exploration summary models, but are recorded in the\n# exploration commit log.\nMIGRATION_BOT_USER_ID = 'OppiaMigrationBot'\nMIGRATION_BOT_USERNAME = 'OppiaMigrationBot'\n\n# User id and username for suggestion bot. This bot will be used to accept\n# suggestions automatically after a threshold time.\nSUGGESTION_BOT_USER_ID = 'OppiaSuggestionBot'\nSUGGESTION_BOT_USERNAME = 'OppiaSuggestionBot'\n\n# Ids and locations of the permitted extensions.\nALLOWED_RTE_EXTENSIONS = {\n 'Collapsible': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')\n },\n 'Image': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')\n },\n 'Link': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')\n },\n 'Math': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')\n },\n 'Tabs': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')\n },\n 'Video': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')\n },\n}\n\n# The list of interaction IDs which correspond to interactions that set their\n# is_linear property to true. Linear interactions do not support branching and\n# thus only allow for default answer classification. This value is guarded by a\n# test in extensions.interactions.base_test.\nLINEAR_INTERACTION_IDS = ['Continue']\n\n# Demo explorations to load through the admin panel. The id assigned to each\n# exploration is based on the key of the exploration in this dict, so ensure it\n# doesn't change once it's in the list. Only integer-based indices should be\n# used in this list, as it maintains backward compatibility with how demo\n# explorations used to be assigned IDs. The value of each entry in this dict is\n# either a YAML file or a directory (depending on whether it ends in .yaml).\n# These explorations can be found under data/explorations.\nDEMO_EXPLORATIONS = {\n u'0': 'welcome.yaml',\n u'1': 'multiples.yaml',\n u'2': 'binary_search',\n u'3': 'root_linear_coefficient_theorem.yaml',\n u'4': 'three_balls',\n # TODO(bhenning): Replace demo exploration '5' with a new exploration\n # described in #1376.\n u'6': 'boot_verbs.yaml',\n u'7': 'hola.yaml',\n u'8': 'adventure.yaml',\n u'9': 'pitch_perfect.yaml',\n u'10': 'test_interactions',\n u'11': 'modeling_graphs',\n u'12': 'protractor_test_1.yaml',\n u'13': 'solar_system',\n u'14': 'about_oppia.yaml',\n u'15': 'classifier_demo_exploration.yaml',\n u'16': 'all_interactions',\n u'17': 'audio_test',\n u'18': 'code_classifier_test.yaml',\n u'19': 'example_exploration_in_collection1.yaml',\n u'20': 'example_exploration_in_collection2.yaml',\n u'21': 'example_exploration_in_collection3.yaml',\n u'22': 'protractor_mobile_test_exploration.yaml',\n u'23': 'rating_test.yaml',\n u'24': 'learner_flow_test.yaml',\n u'25': 'exploration_player_test.yaml',\n}\n\nDEMO_COLLECTIONS = {\n u'0': 'welcome_to_collections.yaml',\n u'1': 'learner_flow_test_collection.yaml'\n}\n\n# IDs of explorations which should not be displayable in either the learner or\n# editor views.\nDISABLED_EXPLORATION_IDS = ['5']\n\n# Oppia Google Group URL.\nGOOGLE_GROUP_URL = (\n 'https://groups.google.com/forum/?place=forum/oppia#!forum/oppia')\n\n# External URL for the Foundation site.\nFOUNDATION_SITE_URL = 'http://oppiafoundation.org'\n\n# Prefix for all taskqueue-related URLs.\nTASKQUEUE_URL_PREFIX = '/task'\nTASK_URL_FEEDBACK_MESSAGE_EMAILS = (\n '%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_FEEDBACK_STATUS_EMAILS = (\n '%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_FLAG_EXPLORATION_EMAILS = (\n '%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_INSTANT_FEEDBACK_EMAILS = (\n '%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_SUGGESTION_EMAILS = (\n '%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX)\n\n# TODO(sll): Add all other URLs here.\nADMIN_URL = '/admin'\nADMIN_ROLE_HANDLER_URL = '/adminrolehandler'\nCOLLECTION_DATA_URL_PREFIX = '/collection_handler/data'\nCOLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data'\nCOLLECTION_SUMMARIES_DATA_URL = '/collectionsummarieshandler/data'\nCOLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights'\nCOLLECTION_PUBLISH_PREFIX = '/collection_editor_handler/publish'\nCOLLECTION_UNPUBLISH_PREFIX = '/collection_editor_handler/unpublish'\nCOLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create'\nCOLLECTION_URL_PREFIX = '/collection'\nCONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler'\nCREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data'\nCREATOR_DASHBOARD_URL = '/creator_dashboard'\nCUSTOM_NONPROFITS_LANDING_PAGE_URL = '/nonprofits'\nCUSTOM_PARENTS_LANDING_PAGE_URL = '/parents'\nCUSTOM_PARTNERS_LANDING_PAGE_URL = '/partners'\nCUSTOM_TEACHERS_LANDING_PAGE_URL = '/teachers'\nCUSTOM_VOLUNTEERS_LANDING_PAGE_URL = '/volunteers'\nDASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL\nEDITOR_URL_PREFIX = '/create'\nEXPLORATION_DATA_PREFIX = '/createhandler/data'\nEXPLORATION_FEATURES_PREFIX = '/explorehandler/features'\nEXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'\nEXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search'\nEXPLORATION_PRETESTS_URL_PREFIX = '/pretest_handler'\nEXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'\nEXPLORATION_STATE_ANSWER_STATS_PREFIX = '/createhandler/state_answer_stats'\nEXPLORATION_STATUS_PREFIX = '/createhandler/status'\nEXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data'\nEXPLORATION_URL_PREFIX = '/explore'\nEXPLORATION_URL_EMBED_PREFIX = '/embed/exploration'\nFEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler'\nFEEDBACK_THREAD_URL_PREFIX = '/threadhandler'\nFEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'\nFEEDBACK_THREADLIST_URL_PREFIX_FOR_TOPICS = '/threadlisthandlerfortopic'\nFEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event'\nFLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler'\nFRACTIONS_LANDING_PAGE_URL = '/fractions'\nTOPIC_LANDING_PAGE_URL = '/learn/<subject>/<topic>'\nLEARNER_DASHBOARD_URL = '/learner_dashboard'\nLEARNER_DASHBOARD_DATA_URL = '/learnerdashboardhandler/data'\nLEARNER_DASHBOARD_IDS_DATA_URL = '/learnerdashboardidshandler/data'\nLEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL = '/learnerdashboardthreadhandler'\nLEARNER_PLAYLIST_DATA_URL = '/learnerplaylistactivityhandler'\nLEARNER_INCOMPLETE_ACTIVITY_DATA_URL = '/learnerincompleteactivityhandler'\nLIBRARY_GROUP_DATA_URL = '/librarygrouphandler'\nLIBRARY_INDEX_URL = '/library'\nLIBRARY_INDEX_DATA_URL = '/libraryindexhandler'\nLIBRARY_RECENTLY_PUBLISHED_URL = '/library/recently_published'\nLIBRARY_SEARCH_URL = '/search/find'\nLIBRARY_SEARCH_DATA_URL = '/searchhandler/data'\nLIBRARY_TOP_RATED_URL = '/library/top_rated'\nMERGE_SKILLS_URL = '/merge_skills_handler'\nNEW_COLLECTION_URL = '/collection_editor_handler/create_new'\nNEW_EXPLORATION_URL = '/contributehandler/create_new'\nNEW_QUESTION_URL = '/question_editor_handler/create_new'\nNEW_SKILL_URL = '/skill_editor_handler/create_new'\nTOPIC_EDITOR_STORY_URL = '/topic_editor_story_handler'\nTOPIC_EDITOR_QUESTION_URL = '/topic_editor_question_handler'\nNEW_TOPIC_URL = '/topic_editor_handler/create_new'\nNOTIFICATIONS_DASHBOARD_URL = '/notifications_dashboard'\nPREFERENCES_URL = '/preferences'\nPRACTICE_SESSION_URL_PREFIX = '/practice_session'\nPRACTICE_SESSION_DATA_URL_PREFIX = '/practice_session/data'\nPREFERENCES_DATA_URL = '/preferenceshandler/data'\nQUESTION_EDITOR_DATA_URL_PREFIX = '/question_editor_handler/data'\nQUESTION_SKILL_LINK_URL_PREFIX = '/manage_question_skill_link'\nQUESTIONS_URL_PREFIX = '/question_player_handler'\nRECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'\nRECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'\nROBOTS_TXT_URL = '/robots.txt'\nSITE_LANGUAGE_DATA_URL = '/save_site_language'\nSIGNUP_DATA_URL = '/signuphandler/data'\nSIGNUP_URL = '/signup'\nSKILL_EDITOR_DATA_URL_PREFIX = '/skill_editor_handler/data'\nSKILL_EDITOR_URL_PREFIX = '/skill_editor'\nSKILL_EDITOR_QUESTION_URL = '/skill_editor_question_handler'\nSKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights'\nSKILL_PUBLISH_URL_PREFIX = '/skill_editor_handler/publish_skill'\nSPLASH_URL = '/splash'\nSTORY_DATA_HANDLER = '/story_data_handler'\nSTORY_EDITOR_URL_PREFIX = '/story_editor'\nSTORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data'\nSUBTOPIC_DATA_HANDLER = '/subtopic_data_handler'\nSUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler'\nSUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler'\nSUGGESTION_URL_PREFIX = '/suggestionhandler'\nSUBSCRIBE_URL_PREFIX = '/subscribehandler'\nSUBTOPIC_PAGE_EDITOR_DATA_URL_PREFIX = '/subtopic_page_editor_handler/data'\nTOPIC_VIEWER_URL_PREFIX = '/topic'\nTOPIC_DATA_HANDLER = '/topic_data_handler'\nTOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data'\nTOPIC_EDITOR_URL_PREFIX = '/topic_editor'\nTOPIC_MANAGER_RIGHTS_URL_PREFIX = '/rightshandler/assign_topic_manager'\nTOPIC_RIGHTS_URL_PREFIX = '/rightshandler/get_topic_rights'\nTOPIC_SEND_MAIL_URL_PREFIX = '/rightshandler/send_topic_publish_mail'\nTOPIC_STATUS_URL_PREFIX = '/rightshandler/change_topic_status'\nTOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data'\nTOPICS_AND_SKILLS_DASHBOARD_URL = '/topics_and_skills_dashboard'\nUNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler'\nUPLOAD_EXPLORATION_URL = '/contributehandler/upload'\nUSER_EXPLORATION_EMAILS_PREFIX = '/createhandler/notificationpreferences'\nUSERNAME_CHECK_DATA_URL = '/usernamehandler/data'\nVOICEOVER_DATA_PREFIX = '/createhandler/voiceover'\n\n# Event types.\nEVENT_TYPE_ALL_STATS = 'all_stats'\nEVENT_TYPE_STATE_HIT = 'state_hit'\nEVENT_TYPE_STATE_COMPLETED = 'state_complete'\nEVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'\nEVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'\nEVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'\nEVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'\nEVENT_TYPE_RATE_EXPLORATION = 'rate_exploration'\nEVENT_TYPE_SOLUTION_HIT = 'solution_hit'\nEVENT_TYPE_LEAVE_FOR_REFRESHER_EXP = 'leave_for_refresher_exp'\n# The values for these event types should be left as-is for backwards\n# compatibility.\nEVENT_TYPE_START_EXPLORATION = 'start'\nEVENT_TYPE_ACTUAL_START_EXPLORATION = 'actual_start'\nEVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'\nEVENT_TYPE_COMPLETE_EXPLORATION = 'complete'\n\n# Play type constants.\nPLAY_TYPE_PLAYTEST = 'playtest'\nPLAY_TYPE_NORMAL = 'normal'\n\n# Predefined commit messages.\nCOMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'\nCOMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.'\nCOMMIT_MESSAGE_QUESTION_DELETED = 'Question deleted.'\nCOMMIT_MESSAGE_SKILL_DELETED = 'Skill deleted.'\nCOMMIT_MESSAGE_STORY_DELETED = 'Story deleted.'\nCOMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED = 'Subtopic page deleted.'\nCOMMIT_MESSAGE_TOPIC_DELETED = 'Topic deleted.'\n\n# Max number of playthroughs for an issue.\nMAX_PLAYTHROUGHS_FOR_ISSUE = 5\n\n# Unfinished features.\nSHOW_TRAINABLE_UNRESOLVED_ANSWERS = False\n# Number of unresolved answers to be displayed in the dashboard for each\n# exploration.\nTOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3\n# Number of open feedback to be displayed in the dashboard for each exploration.\nOPEN_FEEDBACK_COUNT_DASHBOARD = 3\n# NOTE TO DEVELOPERS: This should be synchronized with App.js.\nENABLE_ML_CLASSIFIERS = False\nSHOW_COLLECTION_NAVIGATION_TAB_HISTORY = False\nSHOW_COLLECTION_NAVIGATION_TAB_STATS = False\n\n# The regular expression used to identify whether a string contains float value.\n# The regex must match with regex that is stored in vmconf.py file of Oppia-ml.\n# If this regex needs to be modified then first of all shutdown Oppia-ml VM.\n# Then update the regex constant in here and Oppia both.\n# Run any migration job that is required to migrate existing trained models\n# before starting Oppia-ml again.\nFLOAT_VERIFIER_REGEX = (\n '^([-+]?\\\\d*\\\\.\\\\d+)$|^([-+]?(\\\\d*\\\\.?\\\\d+|\\\\d+\\\\.?\\\\d*)e[-+]?\\\\d*)$')\n\n# Current event models schema version. All event models with an\n# event_schema_version of 1 are the events collected before the rework of the\n# statistics framework which brought about the recording of new event models;\n# these models include all models recorded before Feb 2018.\nCURRENT_EVENT_MODELS_SCHEMA_VERSION = 2\n\n# Output formats of downloaded explorations.\nOUTPUT_FORMAT_JSON = 'json'\nOUTPUT_FORMAT_ZIP = 'zip'\n\n# Types of updates shown in the 'recent updates' table in the dashboard page.\nUPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'\nUPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit'\nUPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'\n\n# Possible values for user query status.\n# Valid status transitions are: processing --> completed --> archived\n# or processing --> failed.\nUSER_QUERY_STATUS_PROCESSING = 'processing'\nUSER_QUERY_STATUS_COMPLETED = 'completed'\nUSER_QUERY_STATUS_ARCHIVED = 'archived'\nUSER_QUERY_STATUS_FAILED = 'failed'\n\n# The time difference between which to consider two login events \"close\". This\n# is taken to be 12 hours.\nPROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60\n\n# The i18n id for the header of the \"Featured Activities\" category in the\n# library index page.\nLIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES'\n# The i18n id for the header of the \"Top Rated Explorations\" category in the\n# library index page.\nLIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = (\n 'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS')\n# The i18n id for the header of the \"Recently Published\" category in the\n# library index page.\nLIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED'\n\n# The group name that appears at the end of the url for the recently published\n# page.\nLIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently_published'\n# The group name that appears at the end of the url for the top rated page.\nLIBRARY_GROUP_TOP_RATED = 'top_rated'\n\n# Defaults for topic similarities.\nDEFAULT_TOPIC_SIMILARITY = 0.5\nSAME_TOPIC_SIMILARITY = 1.0\n\n# The following are all page descriptions for the meta tag.\nABOUT_PAGE_DESCRIPTION = (\n 'Oppia is an open source learning platform that connects a community of '\n 'teachers and learners. You can use this site to create 1-1 learning '\n 'scenarios for others.')\nGET_STARTED_PAGE_DESCRIPTION = (\n 'Learn how to get started using Oppia.')\nCONTACT_PAGE_DESCRIPTION = (\n 'Contact the Oppia team, submit feedback, and learn how to get involved '\n 'with the Oppia project.')\nCREATE_PAGE_DESCRIPTION = (\n 'Help others learn new things. Create lessons through explorations and '\n 'share your knowledge with the community.')\nCREATOR_DASHBOARD_PAGE_DESCRIPTION = (\n 'Keep track of the lessons you have created, as well as feedback from '\n 'learners.')\nDONATE_PAGE_DESCRIPTION = (\n 'Donate to The Oppia Foundation.')\nLIBRARY_GROUP_PAGE_DESCRIPTION = (\n 'Discover top-rated or recently-published explorations on Oppia. Learn '\n 'from these explorations or help improve an existing one for the '\n 'community.')\nLIBRARY_PAGE_DESCRIPTION = (\n 'Looking to learn something new? Find explorations created by professors, '\n 'teachers and Oppia users in a subject you\\'re interested in, and start '\n 'exploring!')\nPREFERENCES_PAGE_DESCRIPTION = (\n 'Change your Oppia profile settings and preferences')\nSEARCH_PAGE_DESCRIPTION = (\n 'Discover a new exploration to learn from, or help improve an existing '\n 'one for the community.')\nSIGNUP_PAGE_DESCRIPTION = (\n 'Sign up for Oppia and begin exploring a new subject.')\nSPLASH_PAGE_DESCRIPTION = (\n 'Oppia is a free site for sharing knowledge via interactive lessons '\n 'called \\'explorations\\'. Learn from user-created explorations, or teach '\n 'and create your own.')\nTEACH_PAGE_DESCRIPTION = (\n 'The Oppia library is full of user-created lessons called \\'explorations\\'.'\n ' Read about how to participate in the community and begin creating '\n 'explorations.')\nTERMS_PAGE_DESCRIPTION = (\n 'Oppia is a 501(c)(3) registered non-profit open-source e-learning '\n 'platform. Learn about our terms and conditions for creating and '\n 'distributing learning material.')\nTHANKS_PAGE_DESCRIPTION = (\n 'Thank you for donating to The Oppia Foundation.')\n\n# The type of the response returned by a handler when an exception is raised.\nHANDLER_TYPE_HTML = 'html'\nHANDLER_TYPE_JSON = 'json'\nHANDLER_TYPE_DOWNLOADABLE = 'downloadable'\n\n# Following are the constants for the role IDs.\nROLE_ID_GUEST = 'GUEST'\nROLE_ID_BANNED_USER = 'BANNED_USER'\nROLE_ID_EXPLORATION_EDITOR = 'EXPLORATION_EDITOR'\nROLE_ID_COLLECTION_EDITOR = 'COLLECTION_EDITOR'\nROLE_ID_TOPIC_MANAGER = 'TOPIC_MANAGER'\nROLE_ID_MODERATOR = 'MODERATOR'\nROLE_ID_ADMIN = 'ADMIN'\n\n# Intent of the User making query to role structure via admin interface. Used\n# to store audit data regarding queries to role IDs.\nROLE_ACTION_UPDATE = 'update'\nROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username'\nROLE_ACTION_VIEW_BY_ROLE = 'view_by_role'\n\nVIEW_METHOD_ROLE = 'role'\nVIEW_METHOD_USERNAME = 'username'\n\nQUESTION_BATCH_SIZE = 10\n\nSTATE_ANSWER_STATS_MIN_FREQUENCY = 2\n\nRTE_FORMAT_TEXTANGULAR = 'text-angular'\n\nRTE_FORMAT_CKEDITOR = 'ck-editor'\n\n# RTE content specifications according to the type of the editor.\nRTE_CONTENT_SPEC = {\n 'RTE_TYPE_TEXTANGULAR': {\n # Valid parent-child relation in TextAngular.\n 'ALLOWED_PARENT_LIST': {\n 'p': ['blockquote', 'div', 'pre', '[document]', 'ol', 'ul', 'li'],\n 'b': ['i', 'li', 'p', 'pre'],\n 'br': ['b', 'i', 'li', 'p'],\n 'i': ['b', 'li', 'p', 'pre'],\n 'li': ['ol', 'ul'],\n 'ol': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],\n 'ul': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],\n 'pre': ['ol', 'ul', 'blockquote', '[document]'],\n 'blockquote': ['blockquote', '[document]'],\n 'oppia-noninteractive-link': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-math': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-image': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-collapsible': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-video': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-tabs': ['b', 'i', 'li', 'p', 'pre']\n },\n # Valid html tags in TextAngular.\n 'ALLOWED_TAG_LIST': [\n 'p',\n 'b',\n 'br',\n 'i',\n 'li',\n 'ol',\n 'ul',\n 'pre',\n 'blockquote',\n 'oppia-noninteractive-link',\n 'oppia-noninteractive-math',\n 'oppia-noninteractive-image',\n 'oppia-noninteractive-collapsible',\n 'oppia-noninteractive-video',\n 'oppia-noninteractive-tabs'\n ]\n },\n 'RTE_TYPE_CKEDITOR': {\n # Valid parent-child relation in CKEditor.\n 'ALLOWED_PARENT_LIST': {\n 'p': ['blockquote', '[document]', 'li'],\n 'strong': ['em', 'li', 'p', 'pre'],\n 'em': ['strong', 'li', 'p', 'pre'],\n 'br': ['strong', 'em', 'li', 'p'],\n 'li': ['ol', 'ul'],\n 'ol': ['li', 'blockquote', 'pre', '[document]'],\n 'ul': ['li', 'blockquote', 'pre', '[document]'],\n 'pre': ['ol', 'ul', 'blockquote', 'li', '[document]'],\n 'blockquote': ['blockquote', '[document]'],\n 'oppia-noninteractive-link': ['strong', 'em', 'li', 'p', 'pre'],\n 'oppia-noninteractive-math': ['strong', 'em', 'li', 'p', 'pre'],\n 'oppia-noninteractive-image': ['blockquote', 'li', '[document]'],\n 'oppia-noninteractive-collapsible': [\n 'blockquote', 'li', '[document]'\n ],\n 'oppia-noninteractive-video': ['blockquote', 'li', '[document]'],\n 'oppia-noninteractive-tabs': ['blockquote', 'li', '[document]']\n },\n # Valid html tags in CKEditor.\n 'ALLOWED_TAG_LIST': [\n 'p',\n 'strong',\n 'br',\n 'em',\n 'li',\n 'ol',\n 'ul',\n 'pre',\n 'blockquote',\n 'oppia-noninteractive-link',\n 'oppia-noninteractive-math',\n 'oppia-noninteractive-image',\n 'oppia-noninteractive-collapsible',\n 'oppia-noninteractive-video',\n 'oppia-noninteractive-tabs'\n ]\n\n }\n}\n\n# A dict representing available landing pages, having subject as a key and list\n# of topics as the value.\n# Note: This dict needs to be keep in sync with frontend TOPIC_LANDING_PAGE_DATA\n# oppia constant defined in\n# core/templates/dev/head/pages/landing-pages/TopicLandingPage.js file.\nAVAILABLE_LANDING_PAGES = {\n 'maths': ['fractions', 'ratios']\n}\n",
"path": "feconf.py"
}
] | [
{
"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stores various configuration options and constants for Oppia.\"\"\"\n\nimport copy\nimport datetime\nimport os\n\nfrom constants import constants # pylint: disable=relative-import\n\n# Whether to unconditionally log info messages.\nDEBUG = False\n\n# When DEV_MODE is true check that we are running in development environment.\n# The SERVER_SOFTWARE environment variable does not exist in Travis, hence the\n# need for an explicit check.\nif (constants.DEV_MODE and os.getenv('SERVER_SOFTWARE') and\n not os.getenv('SERVER_SOFTWARE', default='').startswith('Development')):\n raise Exception('DEV_MODE can\\'t be true on production.')\n\nCLASSIFIERS_DIR = os.path.join('extensions', 'classifiers')\nTESTS_DATA_DIR = os.path.join('core', 'tests', 'data')\nSAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')\nSAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections')\nCONTENT_VALIDATION_DIR = os.path.join('core', 'domain')\n\nEXTENSIONS_DIR_PREFIX = (\n 'backend_prod_files' if not constants.DEV_MODE else '')\nACTIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions'))\nISSUES_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues'))\nINTERACTIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'interactions'))\nRTE_EXTENSIONS_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'rich_text_components'))\nRTE_EXTENSIONS_DEFINITIONS_PATH = (\n os.path.join('assets', 'rich_text_components_definitions.js'))\n\nOBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')\n\n# Choose production templates folder when we are in production mode.\nif not constants.DEV_MODE:\n FRONTEND_TEMPLATES_DIR = (\n os.path.join('backend_prod_files', 'templates', 'head'))\nelse:\n FRONTEND_TEMPLATES_DIR = os.path.join('core', 'templates', 'dev', 'head')\nDEPENDENCIES_TEMPLATES_DIR = (\n os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies'))\n\nVALUE_GENERATORS_DIR_FOR_JS = os.path.join(\n 'local_compiled_js', 'extensions', 'value_generators')\nVALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')\n\nVISUALIZATIONS_DIR = os.path.join(\n 'extensions', 'visualizations')\nVISUALIZATIONS_DIR_FOR_JS = os.path.join(\n 'local_compiled_js', 'extensions', 'visualizations')\n\nOBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join(\n 'extensions', 'objects', 'object_defaults.json')\nRULES_DESCRIPTIONS_FILE_PATH = os.path.join(\n os.getcwd(), 'extensions', 'interactions', 'rule_templates.json')\n\n# A mapping of interaction ids to classifier properties.\nINTERACTION_CLASSIFIER_MAPPING = {\n 'TextInput': {\n 'algorithm_id': 'TextClassifier',\n 'current_data_schema_version': 1\n },\n 'CodeRepl': {\n 'algorithm_id': 'CodeClassifier',\n 'current_data_schema_version': 1\n }\n}\n# Classifier job time to live (in mins).\nCLASSIFIER_JOB_TTL_MINS = 5\nTRAINING_JOB_STATUS_COMPLETE = 'COMPLETE'\nTRAINING_JOB_STATUS_FAILED = 'FAILED'\nTRAINING_JOB_STATUS_NEW = 'NEW'\nTRAINING_JOB_STATUS_PENDING = 'PENDING'\n\nALLOWED_TRAINING_JOB_STATUSES = [\n TRAINING_JOB_STATUS_COMPLETE,\n TRAINING_JOB_STATUS_FAILED,\n TRAINING_JOB_STATUS_NEW,\n TRAINING_JOB_STATUS_PENDING\n]\n\n# The maximum number of characters allowed for userbio length.\nMAX_BIO_LENGTH_IN_CHARS = 2000\n\nALLOWED_TRAINING_JOB_STATUS_CHANGES = {\n TRAINING_JOB_STATUS_COMPLETE: [],\n TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING],\n TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE,\n TRAINING_JOB_STATUS_FAILED],\n TRAINING_JOB_STATUS_FAILED: [TRAINING_JOB_STATUS_NEW]\n}\n\nENTITY_TYPE_EXPLORATION = 'exploration'\nENTITY_TYPE_TOPIC = 'topic'\n\n# The maximum number of activities allowed in the playlist of the learner. This\n# limit applies to both the explorations playlist and the collections playlist.\nMAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = 10\n\n# The minimum number of training samples required for training a classifier.\nMIN_TOTAL_TRAINING_EXAMPLES = 50\n\n# The minimum number of assigned labels required for training a classifier.\nMIN_ASSIGNED_LABELS = 2\n\n# Default label for classification algorithms.\nDEFAULT_CLASSIFIER_LABEL = '_default'\n\n# The maximum number of results to retrieve in a datastore query.\nDEFAULT_QUERY_LIMIT = 1000\n\n# The maximum number of results to retrieve in a datastore query\n# for top rated published explorations in /library page.\nNUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8\n\n# The maximum number of results to retrieve in a datastore query\n# for recently published explorations in /library page.\nRECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8\n\n# The maximum number of results to retrieve in a datastore query\n# for top rated published explorations in /library/top_rated page.\nNUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20\n\n# The maximum number of results to retrieve in a datastore query\n# for recently published explorations in /library/recently_published page.\nRECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20\n\n# The current version of the dashboard stats blob schema. If any backward-\n# incompatible changes are made to the stats blob schema in the data store,\n# this version number must be changed.\nCURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1\n\n# The current version of the exploration states blob schema. If any backward-\n# incompatible changes are made to the states blob schema in the data store,\n# this version number must be changed and the exploration migration job\n# executed.\nCURRENT_STATE_SCHEMA_VERSION = 28\n\n# The current version of the all collection blob schemas (such as the nodes\n# structure within the Collection domain object). If any backward-incompatible\n# changes are made to any of the blob schemas in the data store, this version\n# number must be changed.\nCURRENT_COLLECTION_SCHEMA_VERSION = 6\n\n# The current version of story contents dict in the story schema.\nCURRENT_STORY_CONTENTS_SCHEMA_VERSION = 1\n\n# The current version of skill contents dict in the skill schema.\nCURRENT_SKILL_CONTENTS_SCHEMA_VERSION = 1\n\n# The current version of misconceptions dict in the skill schema.\nCURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 1\n\n# The current version of subtopics dict in the topic schema.\nCURRENT_SUBTOPIC_SCHEMA_VERSION = 1\n\n# The current version of page_contents dict in the subtopic page schema.\nCURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION = 1\n\n# This value should be updated in the event of any\n# StateAnswersModel.submitted_answer_list schema change.\nCURRENT_STATE_ANSWERS_SCHEMA_VERSION = 1\n\n# The default number of exploration tiles to load at a time in the search\n# results page.\nSEARCH_RESULTS_PAGE_SIZE = 20\n\n# The default number of commits to show on a page in the exploration history\n# tab.\nCOMMIT_LIST_PAGE_SIZE = 50\n\n# The default number of items to show on a page in the exploration feedback\n# tab.\nFEEDBACK_TAB_PAGE_SIZE = 20\n\n# The maximum number of top unresolved answers which should be aggregated\n# from all of the submitted answers.\nTOP_UNRESOLVED_ANSWERS_LIMIT = 20\n\n# Default title for a newly-minted exploration.\nDEFAULT_EXPLORATION_TITLE = ''\n# Default category for a newly-minted exploration.\nDEFAULT_EXPLORATION_CATEGORY = ''\n# Default objective for a newly-minted exploration.\nDEFAULT_EXPLORATION_OBJECTIVE = ''\n\n# NOTE TO DEVELOPERS: If any of the 5 constants below are modified, the\n# corresponding field in NEW_STATE_TEMPLATE in constants.js also has to be\n# modified.\n\n# Default name for the initial state of an exploration.\nDEFAULT_INIT_STATE_NAME = 'Introduction'\n# Default content id for the state's content.\nDEFAULT_NEW_STATE_CONTENT_ID = 'content'\n# Default content id for the interaction's default outcome.\nDEFAULT_OUTCOME_CONTENT_ID = 'default_outcome'\n# Default content id for the explanation in the concept card of a skill.\nDEFAULT_EXPLANATION_CONTENT_ID = 'explanation'\n# Default recorded_voiceovers dict for a default state template.\nDEFAULT_RECORDED_VOICEOVERS = {\n 'voiceovers_mapping': {\n 'content': {},\n 'default_outcome': {}\n }\n}\n# Default written_translations dict for a default state template.\nDEFAULT_WRITTEN_TRANSLATIONS = {\n 'translations_mapping': {\n 'content': {},\n 'default_outcome': {}\n }\n}\n# The default content text for the initial state of an exploration.\nDEFAULT_INIT_STATE_CONTENT_STR = ''\n\n# Whether new explorations should have automatic text-to-speech enabled\n# by default.\nDEFAULT_AUTO_TTS_ENABLED = True\n\n# Default title for a newly-minted collection.\nDEFAULT_COLLECTION_TITLE = ''\n# Default category for a newly-minted collection.\nDEFAULT_COLLECTION_CATEGORY = ''\n# Default objective for a newly-minted collection.\nDEFAULT_COLLECTION_OBJECTIVE = ''\n\n# Default description for a newly-minted story.\nDEFAULT_STORY_DESCRIPTION = ''\n# Default notes for a newly-minted story.\nDEFAULT_STORY_NOTES = ''\n\n# Default explanation for a newly-minted skill.\nDEFAULT_SKILL_EXPLANATION = ''\n# Default name for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_NAME = ''\n# Default notes for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_NOTES = ''\n# Default feedback for a newly-minted misconception.\nDEFAULT_MISCONCEPTION_FEEDBACK = ''\n# Default content_id for explanation subtitled html.\nDEFAULT_SKILL_EXPLANATION_CONTENT_ID = 'explanation'\n\n# Default description for a newly-minted topic.\nDEFAULT_TOPIC_DESCRIPTION = ''\n# Default content id for the subtopic page's content.\nDEFAULT_SUBTOPIC_PAGE_CONTENT_ID = 'content'\n\n# Default ID of VM which is used for training classifier.\nDEFAULT_VM_ID = 'vm_default'\n# Shared secret key for default VM.\nDEFAULT_VM_SHARED_SECRET = '1a2b3c4e'\n\n# An array containing the accepted image formats (as determined by the imghdr\n# module) and the corresponding allowed extensions in the filenames of uploaded\n# images.\nACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {\n 'jpeg': ['jpg', 'jpeg'],\n 'png': ['png'],\n 'gif': ['gif'],\n}\n\n# An array containing the accepted audio extensions for uploaded files and\n# the corresponding MIME types.\nACCEPTED_AUDIO_EXTENSIONS = {\n 'mp3': ['audio/mp3']\n}\n\n# Prefix for data sent from the server to the client via JSON.\nXSSI_PREFIX = ')]}\\'\\n'\n# A regular expression for alphanumeric characters.\nALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'\n# A regular expression for tags.\nTAG_REGEX = r'^[a-z ]+$'\n\n# Invalid names for parameters used in expressions.\nAUTOMATICALLY_SET_PARAMETER_NAMES = ['answer', 'choices']\nINVALID_PARAMETER_NAMES = AUTOMATICALLY_SET_PARAMETER_NAMES + [\n 'abs', 'all', 'and', 'any', 'else', 'floor', 'if', 'log', 'or',\n 'pow', 'round', 'then']\n\n# These are here rather than in rating_services.py to avoid import\n# circularities with exp_services.\n# TODO (Jacob) Refactor exp_services to remove this problem.\n_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}\n\n\ndef get_empty_ratings():\n \"\"\"Returns a copy of the empty ratings object.\n\n Returns:\n dict. Copy of the '_EMPTY_RATINGS' dict object which contains the empty\n ratings.\n \"\"\"\n return copy.deepcopy(_EMPTY_RATINGS)\n\n\n# Empty scaled average rating as a float.\nEMPTY_SCALED_AVERAGE_RATING = 0.0\n\n# To use GAE email service.\nEMAIL_SERVICE_PROVIDER_GAE = 'gae_email_service'\n# To use mailgun email service.\nEMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service'\n# Use GAE email service by default.\nEMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_GAE\n# If the Mailgun email API is used, the \"None\" below should be replaced\n# with the Mailgun API key.\nMAILGUN_API_KEY = None\n# If the Mailgun email API is used, the \"None\" below should be replaced\n# with the Mailgun domain name (ending with mailgun.org).\nMAILGUN_DOMAIN_NAME = None\n\n# Committer id for system actions.\nSYSTEM_COMMITTER_ID = 'admin'\n# Domain name for email address.\nINCOMING_EMAILS_DOMAIN_NAME = 'example.com'\nSYSTEM_EMAIL_ADDRESS = '[email protected]'\nSYSTEM_EMAIL_NAME = '.'\nADMIN_EMAIL_ADDRESS = '[email protected]'\nNOREPLY_EMAIL_ADDRESS = '[email protected]'\n# Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and\n# correspond to owners of the app before setting this to True. If\n# SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this\n# address cannot be sent. If True then emails can be sent to any user.\nCAN_SEND_EMAILS = False\n# If you want to turn on this facility please check the email templates in the\n# send_role_notification_email() function in email_manager.py and modify them\n# accordingly.\nCAN_SEND_EDITOR_ROLE_EMAILS = False\n# If enabled then emails will be sent to creators for feedback messages.\nCAN_SEND_FEEDBACK_MESSAGE_EMAILS = False\n# If enabled subscription emails will be sent to that user.\nCAN_SEND_SUBSCRIPTION_EMAILS = False\n# Time to wait before sending feedback message emails (currently set to 1\n# hour).\nDEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600\n# Whether to send an email when new feedback message is received for\n# an exploration.\nDEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True\n# Whether to send an email to all the creator's subscribers when he/she\n# publishes an exploration.\nDEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE = True\n# Whether exploration feedback emails are muted,\n# when the user has not specified a preference.\nDEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE = False\n# Whether exploration suggestion emails are muted,\n# when the user has not specified a preference.\nDEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE = False\n# Whether to send email updates to a user who has not specified a preference.\nDEFAULT_EMAIL_UPDATES_PREFERENCE = False\n# Whether to send an invitation email when the user is granted\n# new role permissions in an exploration.\nDEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True\n# Whether to require an email to be sent, following a moderator action.\nREQUIRE_EMAIL_ON_MODERATOR_ACTION = False\n# Timespan in minutes before allowing duplicate emails.\nDUPLICATE_EMAIL_INTERVAL_MINS = 2\n# Number of digits after decimal to which the average ratings value in the\n# dashboard is rounded off to.\nAVERAGE_RATINGS_DASHBOARD_PRECISION = 2\n# Whether to enable maintenance mode on the site. For non-admins, this redirects\n# all HTTP requests to the maintenance page. This is the only check which\n# determines whether the site is in maintenance mode to avoid queries to the\n# database by non-admins.\nENABLE_MAINTENANCE_MODE = False\n\n# The interactions permissible for a question.\nALLOWED_QUESTION_INTERACTION_IDS = [\n 'TextInput', 'MultipleChoiceInput', 'NumericInput']\n\n# Flag to disable sending emails related to reviews for suggestions. To be\n# flipped after deciding (and implementing) whether a user should be scored\n# only for curated lessons.\nSEND_SUGGESTION_REVIEW_RELATED_EMAILS = False\n# To prevent recording scores for users until details like whether to score\n# users for only curated lessons is confirmed.\nENABLE_RECORDING_OF_SCORES = False\n\n# No. of pretest questions to display.\nNUM_PRETEST_QUESTIONS = 3\n\n# Whether to automatically accept suggestions after a threshold time.\nENABLE_AUTO_ACCEPT_OF_SUGGESTIONS = False\n\nEMAIL_INTENT_SIGNUP = 'signup'\nEMAIL_INTENT_DAILY_BATCH = 'daily_batch'\nEMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification'\nEMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification'\nEMAIL_INTENT_SUBSCRIPTION_NOTIFICATION = 'subscription_notification'\nEMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification'\nEMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content'\nEMAIL_INTENT_MARKETING = 'marketing'\nEMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration'\nEMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration'\nEMAIL_INTENT_QUERY_STATUS_NOTIFICATION = 'query_status_notification'\nEMAIL_INTENT_ONBOARD_REVIEWER = 'onboard_reviewer'\nEMAIL_INTENT_REVIEW_SUGGESTIONS = 'review_suggestions'\n# Possible intents for email sent in bulk.\nBULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing'\nBULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration'\nBULK_EMAIL_INTENT_CREATE_EXPLORATION = 'bulk_email_create_exploration'\nBULK_EMAIL_INTENT_CREATOR_REENGAGEMENT = 'bulk_email_creator_reengagement'\nBULK_EMAIL_INTENT_LEARNER_REENGAGEMENT = 'bulk_email_learner_reengagement'\nBULK_EMAIL_INTENT_TEST = 'bulk_email_test'\n\nMESSAGE_TYPE_FEEDBACK = 'feedback'\nMESSAGE_TYPE_SUGGESTION = 'suggestion'\n\nMODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration'\nDEFAULT_SALUTATION_HTML_FN = (\n lambda recipient_username: 'Hi %s,' % recipient_username)\nDEFAULT_SIGNOFF_HTML_FN = (\n lambda sender_username: (\n 'Thanks!<br>%s (Oppia moderator)' % sender_username))\n\nVALID_MODERATOR_ACTIONS = {\n MODERATOR_ACTION_UNPUBLISH_EXPLORATION: {\n 'email_config': 'unpublish_exploration_email_html_body',\n 'email_subject_fn': (\n lambda exp_title: (\n 'Your Oppia exploration \"%s\" has been unpublished' % exp_title)\n ),\n 'email_intent': 'unpublish_exploration',\n 'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,\n 'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,\n },\n}\n\n# When the site terms were last updated, in UTC.\nREGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0)\n\n# Format of string for dashboard statistics logs.\n# NOTE TO DEVELOPERS: This format should not be changed, since it is used in\n# the existing storage models for UserStatsModel.\nDASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d'\n\n# The maximum size of an uploaded file, in bytes.\nMAX_FILE_SIZE_BYTES = 1048576\n\n# The maximum playback length of an audio file, in seconds.\nMAX_AUDIO_FILE_LENGTH_SEC = 300\n\n# The minimum score required for a user to review suggestions of a particular\n# category.\nMINIMUM_SCORE_REQUIRED_TO_REVIEW = 10\n\n# The prefix for an 'accepted suggestion' commit message.\nCOMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by'\n\n# User id and username for exploration migration bot. Commits made by this bot\n# are not reflected in the exploration summary models, but are recorded in the\n# exploration commit log.\nMIGRATION_BOT_USER_ID = 'OppiaMigrationBot'\nMIGRATION_BOT_USERNAME = 'OppiaMigrationBot'\n\n# User id and username for suggestion bot. This bot will be used to accept\n# suggestions automatically after a threshold time.\nSUGGESTION_BOT_USER_ID = 'OppiaSuggestionBot'\nSUGGESTION_BOT_USERNAME = 'OppiaSuggestionBot'\n\n# Ids and locations of the permitted extensions.\nALLOWED_RTE_EXTENSIONS = {\n 'Collapsible': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')\n },\n 'Image': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')\n },\n 'Link': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')\n },\n 'Math': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')\n },\n 'Tabs': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')\n },\n 'Video': {\n 'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')\n },\n}\n\n# The list of interaction IDs which correspond to interactions that set their\n# is_linear property to true. Linear interactions do not support branching and\n# thus only allow for default answer classification. This value is guarded by a\n# test in extensions.interactions.base_test.\nLINEAR_INTERACTION_IDS = ['Continue']\n\n# Demo explorations to load through the admin panel. The id assigned to each\n# exploration is based on the key of the exploration in this dict, so ensure it\n# doesn't change once it's in the list. Only integer-based indices should be\n# used in this list, as it maintains backward compatibility with how demo\n# explorations used to be assigned IDs. The value of each entry in this dict is\n# either a YAML file or a directory (depending on whether it ends in .yaml).\n# These explorations can be found under data/explorations.\nDEMO_EXPLORATIONS = {\n u'0': 'welcome.yaml',\n u'1': 'multiples.yaml',\n u'2': 'binary_search',\n u'3': 'root_linear_coefficient_theorem.yaml',\n u'4': 'three_balls',\n # TODO(bhenning): Replace demo exploration '5' with a new exploration\n # described in #1376.\n u'6': 'boot_verbs.yaml',\n u'7': 'hola.yaml',\n u'8': 'adventure.yaml',\n u'9': 'pitch_perfect.yaml',\n u'10': 'test_interactions',\n u'11': 'modeling_graphs',\n u'12': 'protractor_test_1.yaml',\n u'13': 'solar_system',\n u'14': 'about_oppia.yaml',\n u'15': 'classifier_demo_exploration.yaml',\n u'16': 'all_interactions',\n u'17': 'audio_test',\n u'18': 'code_classifier_test.yaml',\n u'19': 'example_exploration_in_collection1.yaml',\n u'20': 'example_exploration_in_collection2.yaml',\n u'21': 'example_exploration_in_collection3.yaml',\n u'22': 'protractor_mobile_test_exploration.yaml',\n u'23': 'rating_test.yaml',\n u'24': 'learner_flow_test.yaml',\n u'25': 'exploration_player_test.yaml',\n}\n\nDEMO_COLLECTIONS = {\n u'0': 'welcome_to_collections.yaml',\n u'1': 'learner_flow_test_collection.yaml'\n}\n\n# IDs of explorations which should not be displayable in either the learner or\n# editor views.\nDISABLED_EXPLORATION_IDS = ['5']\n\n# Oppia Google Group URL.\nGOOGLE_GROUP_URL = (\n 'https://groups.google.com/forum/?place=forum/oppia#!forum/oppia')\n\n# External URL for the Foundation site.\nFOUNDATION_SITE_URL = 'http://oppiafoundation.org'\n\n# Prefix for all taskqueue-related URLs.\nTASKQUEUE_URL_PREFIX = '/task'\nTASK_URL_FEEDBACK_MESSAGE_EMAILS = (\n '%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_FEEDBACK_STATUS_EMAILS = (\n '%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_FLAG_EXPLORATION_EMAILS = (\n '%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_INSTANT_FEEDBACK_EMAILS = (\n '%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)\nTASK_URL_SUGGESTION_EMAILS = (\n '%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX)\n\n# TODO(sll): Add all other URLs here.\nADMIN_URL = '/admin'\nADMIN_ROLE_HANDLER_URL = '/adminrolehandler'\nCOLLECTION_DATA_URL_PREFIX = '/collection_handler/data'\nCOLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data'\nCOLLECTION_SUMMARIES_DATA_URL = '/collectionsummarieshandler/data'\nCOLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights'\nCOLLECTION_PUBLISH_PREFIX = '/collection_editor_handler/publish'\nCOLLECTION_UNPUBLISH_PREFIX = '/collection_editor_handler/unpublish'\nCOLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create'\nCOLLECTION_URL_PREFIX = '/collection'\nCONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler'\nCREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data'\nCREATOR_DASHBOARD_URL = '/creator_dashboard'\nCUSTOM_NONPROFITS_LANDING_PAGE_URL = '/nonprofits'\nCUSTOM_PARENTS_LANDING_PAGE_URL = '/parents'\nCUSTOM_PARTNERS_LANDING_PAGE_URL = '/partners'\nCUSTOM_TEACHERS_LANDING_PAGE_URL = '/teachers'\nCUSTOM_VOLUNTEERS_LANDING_PAGE_URL = '/volunteers'\nDASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL\nEDITOR_URL_PREFIX = '/create'\nEXPLORATION_DATA_PREFIX = '/createhandler/data'\nEXPLORATION_FEATURES_PREFIX = '/explorehandler/features'\nEXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'\nEXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search'\nEXPLORATION_PRETESTS_URL_PREFIX = '/pretest_handler'\nEXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'\nEXPLORATION_STATE_ANSWER_STATS_PREFIX = '/createhandler/state_answer_stats'\nEXPLORATION_STATUS_PREFIX = '/createhandler/status'\nEXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data'\nEXPLORATION_URL_PREFIX = '/explore'\nEXPLORATION_URL_EMBED_PREFIX = '/embed/exploration'\nFEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler'\nFEEDBACK_THREAD_URL_PREFIX = '/threadhandler'\nFEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'\nFEEDBACK_THREADLIST_URL_PREFIX_FOR_TOPICS = '/threadlisthandlerfortopic'\nFEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event'\nFLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler'\nFRACTIONS_LANDING_PAGE_URL = '/fractions'\nTOPIC_LANDING_PAGE_URL = '/learn/<subject>/<topic>'\nLEARNER_DASHBOARD_URL = '/learner_dashboard'\nLEARNER_DASHBOARD_DATA_URL = '/learnerdashboardhandler/data'\nLEARNER_DASHBOARD_IDS_DATA_URL = '/learnerdashboardidshandler/data'\nLEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL = '/learnerdashboardthreadhandler'\nLEARNER_PLAYLIST_DATA_URL = '/learnerplaylistactivityhandler'\nLEARNER_INCOMPLETE_ACTIVITY_DATA_URL = '/learnerincompleteactivityhandler'\nLIBRARY_GROUP_DATA_URL = '/librarygrouphandler'\nLIBRARY_INDEX_URL = '/library'\nLIBRARY_INDEX_DATA_URL = '/libraryindexhandler'\nLIBRARY_RECENTLY_PUBLISHED_URL = '/library/recently_published'\nLIBRARY_SEARCH_URL = '/search/find'\nLIBRARY_SEARCH_DATA_URL = '/searchhandler/data'\nLIBRARY_TOP_RATED_URL = '/library/top_rated'\nMERGE_SKILLS_URL = '/merge_skills_handler'\nNEW_COLLECTION_URL = '/collection_editor_handler/create_new'\nNEW_EXPLORATION_URL = '/contributehandler/create_new'\nNEW_QUESTION_URL = '/question_editor_handler/create_new'\nNEW_SKILL_URL = '/skill_editor_handler/create_new'\nTOPIC_EDITOR_STORY_URL = '/topic_editor_story_handler'\nTOPIC_EDITOR_QUESTION_URL = '/topic_editor_question_handler'\nNEW_TOPIC_URL = '/topic_editor_handler/create_new'\nNOTIFICATIONS_DASHBOARD_URL = '/notifications_dashboard'\nPREFERENCES_URL = '/preferences'\nPRACTICE_SESSION_URL_PREFIX = '/practice_session'\nPRACTICE_SESSION_DATA_URL_PREFIX = '/practice_session/data'\nPREFERENCES_DATA_URL = '/preferenceshandler/data'\nQUESTION_EDITOR_DATA_URL_PREFIX = '/question_editor_handler/data'\nQUESTION_SKILL_LINK_URL_PREFIX = '/manage_question_skill_link'\nQUESTIONS_URL_PREFIX = '/question_player_handler'\nRECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'\nRECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'\nROBOTS_TXT_URL = '/robots.txt'\nSITE_LANGUAGE_DATA_URL = '/save_site_language'\nSIGNUP_DATA_URL = '/signuphandler/data'\nSIGNUP_URL = '/signup'\nSKILL_EDITOR_DATA_URL_PREFIX = '/skill_editor_handler/data'\nSKILL_EDITOR_URL_PREFIX = '/skill_editor'\nSKILL_EDITOR_QUESTION_URL = '/skill_editor_question_handler'\nSKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights'\nSKILL_PUBLISH_URL_PREFIX = '/skill_editor_handler/publish_skill'\nSPLASH_URL = '/splash'\nSTORY_DATA_HANDLER = '/story_data_handler'\nSTORY_EDITOR_URL_PREFIX = '/story_editor'\nSTORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data'\nSUBTOPIC_DATA_HANDLER = '/subtopic_data_handler'\nSUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler'\nSUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler'\nSUGGESTION_URL_PREFIX = '/suggestionhandler'\nSUBSCRIBE_URL_PREFIX = '/subscribehandler'\nSUBTOPIC_PAGE_EDITOR_DATA_URL_PREFIX = '/subtopic_page_editor_handler/data'\nTOPIC_VIEWER_URL_PREFIX = '/topic'\nTOPIC_DATA_HANDLER = '/topic_data_handler'\nTOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data'\nTOPIC_EDITOR_URL_PREFIX = '/topic_editor'\nTOPIC_MANAGER_RIGHTS_URL_PREFIX = '/rightshandler/assign_topic_manager'\nTOPIC_RIGHTS_URL_PREFIX = '/rightshandler/get_topic_rights'\nTOPIC_SEND_MAIL_URL_PREFIX = '/rightshandler/send_topic_publish_mail'\nTOPIC_STATUS_URL_PREFIX = '/rightshandler/change_topic_status'\nTOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data'\nTOPICS_AND_SKILLS_DASHBOARD_URL = '/topics_and_skills_dashboard'\nUNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler'\nUPLOAD_EXPLORATION_URL = '/contributehandler/upload'\nUSER_EXPLORATION_EMAILS_PREFIX = '/createhandler/notificationpreferences'\nUSERNAME_CHECK_DATA_URL = '/usernamehandler/data'\nVOICEOVER_DATA_PREFIX = '/createhandler/voiceover'\n\n# Event types.\nEVENT_TYPE_ALL_STATS = 'all_stats'\nEVENT_TYPE_STATE_HIT = 'state_hit'\nEVENT_TYPE_STATE_COMPLETED = 'state_complete'\nEVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'\nEVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'\nEVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'\nEVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'\nEVENT_TYPE_RATE_EXPLORATION = 'rate_exploration'\nEVENT_TYPE_SOLUTION_HIT = 'solution_hit'\nEVENT_TYPE_LEAVE_FOR_REFRESHER_EXP = 'leave_for_refresher_exp'\n# The values for these event types should be left as-is for backwards\n# compatibility.\nEVENT_TYPE_START_EXPLORATION = 'start'\nEVENT_TYPE_ACTUAL_START_EXPLORATION = 'actual_start'\nEVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'\nEVENT_TYPE_COMPLETE_EXPLORATION = 'complete'\n\n# Play type constants.\nPLAY_TYPE_PLAYTEST = 'playtest'\nPLAY_TYPE_NORMAL = 'normal'\n\n# Predefined commit messages.\nCOMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'\nCOMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.'\nCOMMIT_MESSAGE_QUESTION_DELETED = 'Question deleted.'\nCOMMIT_MESSAGE_SKILL_DELETED = 'Skill deleted.'\nCOMMIT_MESSAGE_STORY_DELETED = 'Story deleted.'\nCOMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED = 'Subtopic page deleted.'\nCOMMIT_MESSAGE_TOPIC_DELETED = 'Topic deleted.'\n\n# Max number of playthroughs for an issue.\nMAX_PLAYTHROUGHS_FOR_ISSUE = 5\n\n# Unfinished features.\nSHOW_TRAINABLE_UNRESOLVED_ANSWERS = False\n# Number of unresolved answers to be displayed in the dashboard for each\n# exploration.\nTOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3\n# Number of open feedback to be displayed in the dashboard for each exploration.\nOPEN_FEEDBACK_COUNT_DASHBOARD = 3\n# NOTE TO DEVELOPERS: This should be synchronized with App.js.\nENABLE_ML_CLASSIFIERS = False\nSHOW_COLLECTION_NAVIGATION_TAB_HISTORY = False\nSHOW_COLLECTION_NAVIGATION_TAB_STATS = False\n\n# The regular expression used to identify whether a string contains float value.\n# The regex must match with regex that is stored in vmconf.py file of Oppia-ml.\n# If this regex needs to be modified then first of all shutdown Oppia-ml VM.\n# Then update the regex constant in here and Oppia both.\n# Run any migration job that is required to migrate existing trained models\n# before starting Oppia-ml again.\nFLOAT_VERIFIER_REGEX = (\n '^([-+]?\\\\d*\\\\.\\\\d+)$|^([-+]?(\\\\d*\\\\.?\\\\d+|\\\\d+\\\\.?\\\\d*)e[-+]?\\\\d*)$')\n\n# Current event models schema version. All event models with an\n# event_schema_version of 1 are the events collected before the rework of the\n# statistics framework which brought about the recording of new event models;\n# these models include all models recorded before Feb 2018.\nCURRENT_EVENT_MODELS_SCHEMA_VERSION = 2\n\n# Output formats of downloaded explorations.\nOUTPUT_FORMAT_JSON = 'json'\nOUTPUT_FORMAT_ZIP = 'zip'\n\n# Types of updates shown in the 'recent updates' table in the dashboard page.\nUPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'\nUPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit'\nUPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'\n\n# Possible values for user query status.\n# Valid status transitions are: processing --> completed --> archived\n# or processing --> failed.\nUSER_QUERY_STATUS_PROCESSING = 'processing'\nUSER_QUERY_STATUS_COMPLETED = 'completed'\nUSER_QUERY_STATUS_ARCHIVED = 'archived'\nUSER_QUERY_STATUS_FAILED = 'failed'\n\n# The time difference between which to consider two login events \"close\". This\n# is taken to be 12 hours.\nPROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60\n\n# The i18n id for the header of the \"Featured Activities\" category in the\n# library index page.\nLIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES'\n# The i18n id for the header of the \"Top Rated Explorations\" category in the\n# library index page.\nLIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = (\n 'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS')\n# The i18n id for the header of the \"Recently Published\" category in the\n# library index page.\nLIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED'\n\n# The group name that appears at the end of the url for the recently published\n# page.\nLIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently_published'\n# The group name that appears at the end of the url for the top rated page.\nLIBRARY_GROUP_TOP_RATED = 'top_rated'\n\n# Defaults for topic similarities.\nDEFAULT_TOPIC_SIMILARITY = 0.5\nSAME_TOPIC_SIMILARITY = 1.0\n\n# The following are all page descriptions for the meta tag.\nABOUT_PAGE_DESCRIPTION = (\n 'Oppia is an open source learning platform that connects a community of '\n 'teachers and learners. You can use this site to create 1-1 learning '\n 'scenarios for others.')\nGET_STARTED_PAGE_DESCRIPTION = (\n 'Learn how to get started using Oppia.')\nCONTACT_PAGE_DESCRIPTION = (\n 'Contact the Oppia team, submit feedback, and learn how to get involved '\n 'with the Oppia project.')\nCREATE_PAGE_DESCRIPTION = (\n 'Help others learn new things. Create lessons through explorations and '\n 'share your knowledge with the community.')\nCREATOR_DASHBOARD_PAGE_DESCRIPTION = (\n 'Keep track of the lessons you have created, as well as feedback from '\n 'learners.')\nDONATE_PAGE_DESCRIPTION = (\n 'Donate to The Oppia Foundation.')\nLIBRARY_GROUP_PAGE_DESCRIPTION = (\n 'Discover top-rated or recently-published explorations on Oppia. Learn '\n 'from these explorations or help improve an existing one for the '\n 'community.')\nLIBRARY_PAGE_DESCRIPTION = (\n 'Looking to learn something new? Find explorations created by professors, '\n 'teachers and Oppia users in a subject you\\'re interested in, and start '\n 'exploring!')\nPREFERENCES_PAGE_DESCRIPTION = (\n 'Change your Oppia profile settings and preferences')\nSEARCH_PAGE_DESCRIPTION = (\n 'Discover a new exploration to learn from, or help improve an existing '\n 'one for the community.')\nSIGNUP_PAGE_DESCRIPTION = (\n 'Sign up for Oppia and begin exploring a new subject.')\nSPLASH_PAGE_DESCRIPTION = (\n 'Oppia is a free site for sharing knowledge via interactive lessons '\n 'called \\'explorations\\'. Learn from user-created explorations, or teach '\n 'and create your own.')\nTEACH_PAGE_DESCRIPTION = (\n 'The Oppia library is full of user-created lessons called \\'explorations\\'.'\n ' Read about how to participate in the community and begin creating '\n 'explorations.')\nTERMS_PAGE_DESCRIPTION = (\n 'Oppia is a 501(c)(3) registered non-profit open-source e-learning '\n 'platform. Learn about our terms and conditions for creating and '\n 'distributing learning material.')\nTHANKS_PAGE_DESCRIPTION = (\n 'Thank you for donating to The Oppia Foundation.')\n\n# The type of the response returned by a handler when an exception is raised.\nHANDLER_TYPE_HTML = 'html'\nHANDLER_TYPE_JSON = 'json'\nHANDLER_TYPE_DOWNLOADABLE = 'downloadable'\n\n# Following are the constants for the role IDs.\nROLE_ID_GUEST = 'GUEST'\nROLE_ID_BANNED_USER = 'BANNED_USER'\nROLE_ID_EXPLORATION_EDITOR = 'EXPLORATION_EDITOR'\nROLE_ID_COLLECTION_EDITOR = 'COLLECTION_EDITOR'\nROLE_ID_TOPIC_MANAGER = 'TOPIC_MANAGER'\nROLE_ID_MODERATOR = 'MODERATOR'\nROLE_ID_ADMIN = 'ADMIN'\n\n# Intent of the User making query to role structure via admin interface. Used\n# to store audit data regarding queries to role IDs.\nROLE_ACTION_UPDATE = 'update'\nROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username'\nROLE_ACTION_VIEW_BY_ROLE = 'view_by_role'\n\nVIEW_METHOD_ROLE = 'role'\nVIEW_METHOD_USERNAME = 'username'\n\nQUESTION_BATCH_SIZE = 10\n\nSTATE_ANSWER_STATS_MIN_FREQUENCY = 2\n\nRTE_FORMAT_TEXTANGULAR = 'text-angular'\n\nRTE_FORMAT_CKEDITOR = 'ck-editor'\n\n# RTE content specifications according to the type of the editor.\nRTE_CONTENT_SPEC = {\n 'RTE_TYPE_TEXTANGULAR': {\n # Valid parent-child relation in TextAngular.\n 'ALLOWED_PARENT_LIST': {\n 'p': ['blockquote', 'div', 'pre', '[document]', 'ol', 'ul', 'li'],\n 'b': ['i', 'li', 'p', 'pre'],\n 'br': ['b', 'i', 'li', 'p'],\n 'i': ['b', 'li', 'p', 'pre'],\n 'li': ['ol', 'ul'],\n 'ol': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],\n 'ul': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],\n 'pre': ['ol', 'ul', 'blockquote', '[document]'],\n 'blockquote': ['blockquote', '[document]'],\n 'oppia-noninteractive-link': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-math': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-image': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-collapsible': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-video': ['b', 'i', 'li', 'p', 'pre'],\n 'oppia-noninteractive-tabs': ['b', 'i', 'li', 'p', 'pre']\n },\n # Valid html tags in TextAngular.\n 'ALLOWED_TAG_LIST': [\n 'p',\n 'b',\n 'br',\n 'i',\n 'li',\n 'ol',\n 'ul',\n 'pre',\n 'blockquote',\n 'oppia-noninteractive-link',\n 'oppia-noninteractive-math',\n 'oppia-noninteractive-image',\n 'oppia-noninteractive-collapsible',\n 'oppia-noninteractive-video',\n 'oppia-noninteractive-tabs'\n ]\n },\n 'RTE_TYPE_CKEDITOR': {\n # Valid parent-child relation in CKEditor.\n 'ALLOWED_PARENT_LIST': {\n 'p': ['blockquote', '[document]', 'li'],\n 'strong': ['em', 'li', 'p', 'pre'],\n 'em': ['strong', 'li', 'p', 'pre'],\n 'br': ['strong', 'em', 'li', 'p'],\n 'li': ['ol', 'ul'],\n 'ol': ['li', 'blockquote', 'pre', '[document]'],\n 'ul': ['li', 'blockquote', 'pre', '[document]'],\n 'pre': ['ol', 'ul', 'blockquote', 'li', '[document]'],\n 'blockquote': ['blockquote', '[document]'],\n 'oppia-noninteractive-link': ['strong', 'em', 'li', 'p', 'pre'],\n 'oppia-noninteractive-math': ['strong', 'em', 'li', 'p', 'pre'],\n 'oppia-noninteractive-image': ['blockquote', 'li', '[document]'],\n 'oppia-noninteractive-collapsible': [\n 'blockquote', 'li', '[document]'\n ],\n 'oppia-noninteractive-video': ['blockquote', 'li', '[document]'],\n 'oppia-noninteractive-tabs': ['blockquote', 'li', '[document]']\n },\n # Valid html tags in CKEditor.\n 'ALLOWED_TAG_LIST': [\n 'p',\n 'strong',\n 'br',\n 'em',\n 'li',\n 'ol',\n 'ul',\n 'pre',\n 'blockquote',\n 'oppia-noninteractive-link',\n 'oppia-noninteractive-math',\n 'oppia-noninteractive-image',\n 'oppia-noninteractive-collapsible',\n 'oppia-noninteractive-video',\n 'oppia-noninteractive-tabs'\n ]\n\n }\n}\n\n# A dict representing available landing pages, having subject as a key and list\n# of topics as the value.\n# Note: This dict needs to be keep in sync with frontend TOPIC_LANDING_PAGE_DATA\n# oppia constant defined in\n# core/templates/dev/head/pages/landing-pages/TopicLandingPage.js file.\nAVAILABLE_LANDING_PAGES = {\n 'maths': ['fractions', 'negative-numbers', 'ratios']\n}\n",
"path": "feconf.py"
}
] | diff --git a/assets/images/landing/maths/negative-numbers/negative_1.png b/assets/images/landing/maths/negative-numbers/negative_1.png
new file mode 100644
index 0000000000000..1dfc7b286dbad
Binary files /dev/null and b/assets/images/landing/maths/negative-numbers/negative_1.png differ
diff --git a/assets/images/landing/maths/negative-numbers/negative_2.png b/assets/images/landing/maths/negative-numbers/negative_2.png
new file mode 100644
index 0000000000000..f196d5d55d6ad
Binary files /dev/null and b/assets/images/landing/maths/negative-numbers/negative_2.png differ
diff --git a/assets/videos/landing/maths/negative-numbers/negative-numbers_video.mp4 b/assets/videos/landing/maths/negative-numbers/negative-numbers_video.mp4
new file mode 100644
index 0000000000000..aac65e3e68b59
Binary files /dev/null and b/assets/videos/landing/maths/negative-numbers/negative-numbers_video.mp4 differ
diff --git a/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.controller.ts b/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.controller.ts
index 66b5fbdef1187..3e7d78cea5f83 100644
--- a/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.controller.ts
+++ b/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.controller.ts
@@ -16,8 +16,11 @@
* @fileoverview Controller for landing page.
*/
+require(
+ 'components/common-layout-directives/common-elements/' +
+ 'background-banner.directive.ts');
+
require('domain/utilities/UrlInterpolationService.ts');
-require('filters/string-utility-filters/capitalize.filter.ts');
require(
'pages/landing-pages/topic-landing-page/topic-landing-page.controller.ts');
require('services/PageTitleService.ts');
@@ -28,26 +31,72 @@ require('services/SiteAnalyticsService.ts');
oppia.constant('TOPIC_LANDING_PAGE_DATA', {
maths: {
fractions: {
+ topic_title: 'Fractions',
collection_id: '4UgTQUc1tala',
page_data: {
- image_1: 'matthew_paper.png',
- image_2: 'matthew_fractions.png',
+ image_1: {
+ file_name: 'matthew_paper.png',
+ alt: 'Matthew showing parts of fractions written on a card.'
+ },
+ image_2: {
+ file_name: 'matthew_fractions.png',
+ alt: 'Matthew solving problems on Oppia.'
+ },
video: 'fractions_video.mp4',
+ lessons: [
+ 'What is a Fraction?',
+ 'Comparing Fractions',
+ 'The Meaning of "Equal Parts"',
+ 'Adding & Subtracting Fractions'
+ ]
+ }
+ },
+ 'negative-numbers': {
+ topic_title: 'Negative Numbers',
+ collection_id: 'GdYIgsfRZwG7',
+ page_data: {
+ image_1: {
+ file_name: 'negative_1.png',
+ alt: 'A boy showing 3 + -24 written on a slate.'
+ },
+ image_2: {
+ file_name: 'negative_2.png',
+ alt: 'A boy smiling and solving negative-number problems on Oppia.'
+ },
+ video: 'negative-numbers_video.mp4',
+ lessons: [
+ 'The Number Line',
+ 'What is a Negative Number?',
+ 'Adding & Subtracting Negative Numbers',
+ 'Multiplying & Dividing Negative Numbers'
+ ]
}
},
ratios: {
+ topic_title: 'Ratios',
collection_id: '53gXGLIR044l',
page_data: {
- image_1: 'ratios_James.png',
- image_2: 'ratios_question.png',
+ image_1: {
+ file_name: 'ratios_James.png',
+ alt: 'A boy showing 2 is to 3 ratio on a card.'
+ },
+ image_2: {
+ file_name: 'ratios_question.png',
+ alt: 'A smoothie shop and a card having question "What does a ratio' +
+ 'tell us?" with options.'
+ },
video: 'ratios_video.mp4',
+ lessons: [
+ 'What is a Ratio?',
+ 'Equivalent Ratios',
+ 'Ratios & Proportional Reasoning',
+ 'Writing Ratios in Simplest Form'
+ ]
}
}
}
});
-
-
oppia.controller('TopicLandingPage', [
'$filter', '$scope', '$timeout', '$window', 'PageTitleService',
'SiteAnalyticsService', 'UrlInterpolationService', 'TOPIC_LANDING_PAGE_DATA',
@@ -56,36 +105,42 @@ oppia.controller('TopicLandingPage', [
SiteAnalyticsService, UrlInterpolationService, TOPIC_LANDING_PAGE_DATA) {
var pathArray = $window.location.pathname.split('/');
$scope.subject = pathArray[2];
- $scope.topic = pathArray[3];
- var landingPageData = (
- TOPIC_LANDING_PAGE_DATA[$scope.subject][$scope.topic].page_data);
+ var topic = pathArray[3];
+ var topicData = TOPIC_LANDING_PAGE_DATA[$scope.subject][topic];
+ var landingPageData = topicData.page_data;
var assetsPathFormat = '/landing/<subject>/<topic>/<file_name>';
-
- var capitalizedTopic = $filter('capitalize')($scope.topic);
- var pageTitle = 'Learn ' + capitalizedTopic + ' - Oppia';
+ $scope.topicTitle = topicData.topic_title;
+ $scope.lessons = landingPageData.lessons;
+ var pageTitle = 'Learn ' + $scope.topicTitle + ' - Oppia';
PageTitleService.setPageTitle(pageTitle);
+ $scope.bookImageUrl = UrlInterpolationService.getStaticImageUrl(
+ '/splash/books.svg');
- $scope.getRowImageUrl = function(index) {
+ var getImageData = function(index) {
var imageKey = 'image_' + index;
if (landingPageData[imageKey]) {
var imagePath = UrlInterpolationService.interpolateUrl(
angular.copy(assetsPathFormat), {
subject: $scope.subject,
- topic: $scope.topic,
- file_name: landingPageData[imageKey]
+ topic: topic,
+ file_name: landingPageData[imageKey].file_name
});
- return UrlInterpolationService.getStaticImageUrl(imagePath);
- } else {
- throw Error('page_data does not have ' + imageKey + ' key.');
+ return {
+ src: UrlInterpolationService.getStaticImageUrl(imagePath),
+ alt: landingPageData[imageKey].alt
+ };
}
};
+ $scope.image1 = getImageData(1);
+ $scope.image2 = getImageData(2);
+
$scope.getVideoUrl = function() {
if (landingPageData.video) {
var videoPath = UrlInterpolationService.interpolateUrl(
angular.copy(assetsPathFormat), {
subject: $scope.subject,
- topic: $scope.topic,
+ topic: topic,
file_name: landingPageData.video
});
return UrlInterpolationService.getStaticVideoUrl(videoPath);
@@ -94,14 +149,8 @@ oppia.controller('TopicLandingPage', [
}
};
- $scope.getStaticSubjectImageUrl = function(subjectName) {
- return UrlInterpolationService.getStaticImageUrl(
- '/subjects/' + subjectName + '.svg');
- };
-
$scope.onClickGetStartedButton = function() {
- var collectionId = (
- TOPIC_LANDING_PAGE_DATA[$scope.subject][$scope.topic].collection_id);
+ var collectionId = topicData.collection_id;
SiteAnalyticsService.registerOpenCollectionFromLandingPageEvent(
collectionId);
$timeout(function() {
diff --git a/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.mainpage.html b/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.mainpage.html
index 3d9162c73f56b..20e8fa0ef8322 100644
--- a/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.mainpage.html
+++ b/core/templates/dev/head/pages/landing-pages/topic-landing-page/topic-landing-page.mainpage.html
@@ -1,135 +1,39 @@
{% extends 'dist/base.html' %}
{% block content %}
- <div ng-controller="TopicLandingPage">
+ <div ng-controller="TopicLandingPage" class="oppia-landing-page">
<div class="oppia-landing-section text-center" style="background-color: #afd2eb">
<div class="oppia-landing-section-inner">
<div class="container-fluid">
- <div class="row">
- <div class="col-sm-6 col-sm-push-6 oppia-landing-image-div" style="height: auto">
- <img ng-src="<[getRowImageUrl(1)]>" class="oppia-landing-image" style="width: 60%;" alt="">
+ <background-banner class="oppia-landing-background-image"></background-banner>
+ <div class="row oppia-landing-section-row">
+ <div class="col-sm-6 col-sm-push-6 oppia-landing-image-div">
+ <img ng-src="<[image1.src]>" class="oppia-landing-image" alt="<[image1.alt]>">
</div>
- <div class="col-sm-6 col-sm-pull-6" style="z-index: 20">
- <div class="oppia-landing-text-box-0">
- <h1 class="oppia-landing-h1"><[topic | capitalize]> just got easier</h1>
- <h2 class="oppia-landing-h2" style="padding-right: 15px;">Get your students and kids started with our free, effective <[subject]> lessons.</h2>
+ <div class="col-sm-6 col-sm-pull-6">
+ <div class="oppia-landing-text-box-1">
+ <h1 class="oppia-landing-h1 oppia-text-color-green"><[topicTitle]> just got easier</h1>
+ <h2 class="oppia-landing-h2 oppia-text-color-green">Get your students and kids started with our free, effective <[subject]> lessons.</h2>
</div>
- <button class="btn oppia-landing-get-started" ng-click="onClickGetStartedButton('teacher')">Get Started</button>
- <button class="btn oppia-landing-learn-more" ng-click="onClickLearnMoreButton()">Learn More</button>
+ <button class="btn oppia-landing-page-button" ng-click="onClickGetStartedButton('teacher')">Get Started</button>
+ <button class="btn oppia-landing-page-button oppia-make-button-transparent" ng-click="onClickLearnMoreButton()">Learn More</button>
</div>
</div>
</div>
- <div style="position: absolute; top: 115px">
- <div class="oppia-landing-background-icon-row" style="margin-top: 20px">
- <img ng-src="<[getStaticSubjectImageUrl('Humor')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Combinatorics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Cooking')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Government')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Architecture')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('History')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Microbiology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Engineering')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Algorithms')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Economics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Computing')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Reading')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Art')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Creativity')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Physics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Language')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Arithmetic')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chess')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Astronomy')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Religion')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Mathematics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Philosophy')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Humor')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Combinatorics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Cooking')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Government')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Architecture')]>" class="oppia-landing-background-icon" alt="">
- </div>
-
- <div class="oppia-landing-background-icon-row">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Poetry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Puzzles')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Calculus')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Business')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Geography')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Biology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Poetry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Puzzles')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Calculus')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Business')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Geography')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Biology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
- </div>
-
- <div class="oppia-landing-background-icon-row">
- <img ng-src="<[getStaticSubjectImageUrl('Economics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algorithms')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Creativity')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Astronomy')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chess')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Arithmetic')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Language')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Physics')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Combinatorics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Humor')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Philosophy')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Mathematics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Religion')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Cooking')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Engineering')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Microbiology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('History')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Architecture')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Government')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Art')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Reading')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Computing')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Economics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algorithms')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Creativity')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Astronomy')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chess')]>" class="oppia-landing-background-icon" alt="">
- </div>
- </div>
</div>
</div>
<div class="oppia-landing-section text-center" style="background-color: #e8e7e3">
<div class="oppia-landing-section-inner">
<div class="container-fluid">
- <div class="row">
+ <div class="row oppia-landing-section-row">
<div class="col-sm-6 col-sm-push-6 oppia-landing-image-div">
- <img ng-src="<[getRowImageUrl(2)]>" class="oppia-landing-image" alt="">
+ <img ng-src="<[image2.src]>" class="oppia-landing-image" alt="<[image2.alt]>">
</div>
<div class="col-sm-6 col-sm-pull-6">
<div class="oppia-landing-text-box-1">
- <h1 class="oppia-landing-h1" style="color: #242424">Fun storytelling for all</h1>
- <h2 class="oppia-landing-h2" style="color: #242424">Students are guided through explorations with targeted feedback and immersive storytelling.
+ <h1 class="oppia-landing-h1">Fun storytelling for all</h1>
+ <h2 class="oppia-landing-h2">Students are guided through explorations with targeted feedback and immersive storytelling.
<br>
<br>
Oppia guides students step-by-step with helpful hints, so they can complete the lessons on their own.
@@ -144,14 +48,10 @@ <h2 class="oppia-landing-h2" style="color: #242424">Students are guided through
<div class="oppia-landing-section text-center" style="background-color: #429488; ">
<div class="oppia-landing-section-inner" style="margin-bottom: 2%;">
<div class="container-fluid">
- <div class="row">
+ <div class="row oppia-landing-section-row">
<div class="col-sm-6 oppia-landing-image-div">
<div class="oppia-landing-video-frame">
- <video controls type="video/mp4" ng-src="<[getVideoUrl()]>" class="oppia-landing-image
- oppia-landing-image-desktop" onclick="this.paused ? this.play() : this.pause();">Sorry, your browser doesn't support embedded videos.
- </video>
- <video controls type="video/mp4" ng-src="<[getVideoUrl()]>" class="oppia-landing-image
- oppia-landing-image-mobile" onclick="this.paused ? this.play() : this.pause();">Sorry, your browser doesn't support embedded videos.
+ <video controls type="video/mp4" ng-src="<[getVideoUrl()]>" onclick="this.paused ? this.play() : this.pause();">Sorry, your browser doesn't support embedded videos.
</video>
</div>
</div>
@@ -168,82 +68,62 @@ <h2 class="oppia-landing-h2" style="color: #FFFFFF;">By working through lessons
</div>
</div>
- <div class="oppia-landing-section text-center" style="background-color: #afd2eb">
+ <div class="oppia-landing-section text-center" style="background-color: #e8e7e3">
<div class="oppia-landing-section-inner">
- <h1 class="oppia-landing-h1" style="margin-left: 10%; margin-right: 10%; padding-left: 0px; padding-bottom: 10px; text-align: center;white-space: pre-line;">Imagine what your students could learn today!</h1>
- <button class="btn oppia-landing-get-started oppia-landing-get-started-mobile" style="margin-top: 2%;" ng-click="onClickGetStartedButton('teacher')">Get Started</button>
- <h2 class="oppia-landing-h2 oppia-landing-centered-h2 library-text" style="margin-top: 2.5%; text-align: center; width: 55%;">To see high quality lessons on subjects other than <[topic | capitalize]>, visit our Library.</h2>
- <button class="btn oppia-landing-explore-lessons oppia-landing-explore-lessons-mobile" ng-click="onClickExploreLessonsButton()">Explore Lessons</button>
- <div style="position: absolute; top: 40%">
- <div class="oppia-landing-background-icon-row">
- <img ng-src="<[getStaticSubjectImageUrl('Humor')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Combinatorics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Cooking')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Government')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Architecture')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('History')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Microbiology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Engineering')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Algorithms')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Economics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Computing')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Reading')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Art')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Creativity')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Physics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Language')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Arithmetic')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chess')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Astronomy')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Religion')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Mathematics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Philosophy')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Humor')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Combinatorics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Cooking')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Government')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Architecture')]>" class="oppia-landing-background-icon" alt="">
+ <div class="container-fluid">
+ <div class="row oppia-landing-section-row">
+ <div class="col-sm-6 col-sm-push-6 oppia-landing-image-div">
+ <img ng-src="<[bookImageUrl]>" class="oppia-landing-image" alt="">
+ </div>
+ <div class="col-sm-6 col-sm-pull-6">
+ <div class="oppia-landing-text-box-1">
+ <h1 class="oppia-landing-h1 text-center">Topics covered in this lesson</h1>
+ <br>
+ <h2 class="oppia-landing-h2 oppia-lessons-title oppia-text-color-green" ng-repeat="lessonTitle in lessons"><[lessonTitle]></h2>
+ <h2 class="oppia-landing-h2 oppia-lessons-title oppia-text-color-black">... and more!</h2>
+ </div>
+ </div>
</div>
+ </div>
+ </div>
+ </div>
- <div class="oppia-landing-background-icon-row">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Poetry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Puzzles')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Calculus')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Business')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Geography')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Biology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Poetry')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Puzzles')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Calculus')]>" class="oppia-landing-background-icon" alt="">
-
- <img ng-src="<[getStaticSubjectImageUrl('Business')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Geography')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Biology')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Genetics')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Space')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Algebra')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Music')]>" class="oppia-landing-background-icon" alt="">
- <img ng-src="<[getStaticSubjectImageUrl('Chemistry')]>" class="oppia-landing-background-icon" alt="">
+ <div class="oppia-landing-section text-center" style="background-color: #afd2eb">
+ <div class="oppia-landing-section-inner">
+ <div class="container-fluid">
+ <background-banner class="oppia-landing-background-image"></background-banner>
+ <div class="row oppia-landing-section-row">
+ <div class="col-sm-12">
+ <h1 class="oppia-landing-h1 oppia-text-centered oppia-text-color-green" style="padding: 0">Imagine what your students could learn today!</h1>
+ </div>
+ </div>
+ <div class="row oppia-landing-section-row">
+ <div class="col-sm-12">
+ <button class="btn oppia-landing-page-button" ng-click="onClickGetStartedButton('teacher')">Get Started</button>
+ </div>
+ </div>
+ <div class="row oppia-landing-section-row">
+ <div class="col-sm-12">
+ <h2 class="oppia-landing-h2 oppia-text-centered oppia-text-color-green">
+ To see high quality lessons on subjects other than <[topicTitle]>, visit our Library.
+ </h2>
+ </div>
+ </div>
+ <div class="row oppia-landing-section-row oppia-text-color-green">
+ <div class="col-sm-12">
+ <button class="btn oppia-landing-page-button" ng-click="onClickExploreLessonsButton()">Explore Lessons</button>
+ </div>
</div>
</div>
</div>
+
</div>
</div>
<style>
+ .oppia-landing-page h1, h2, button {
+ font-family: "Capriola", "Roboto", Arial, sans-serif;
+ }
.oppia-landing-section {
height: auto;
margin-left: auto;
@@ -251,87 +131,72 @@ <h2 class="oppia-landing-h2 oppia-landing-centered-h2 library-text" style="margi
overflow: hidden;
position: relative;
}
+ .oppia-landing-section-row {
+ display: flex;
+ align-items: center;
+ }
+ .oppia-landing-background-image {
+ zoom: 2;
+ }
.oppia-landing-h1 {
font-size: 2.8vw;
margin: 0;
text-align: left;
- top: 160px;
white-space: pre;
}
- .oppia-landing-section-inner {
- height: auto;
- margin-left: auto;
- margin-right: auto;
- padding-bottom: 3%;
- padding-top: 8%;
- position: relative;
- }
.oppia-landing-h2 {
font-size: 1.95vmax;
line-height: 1.6em;
margin-top: 5%;
- position: relative;
text-align: left;
width: 80%;
- z-index: 20;
}
- .oppia-landing-h1, .oppia-landing-h2 {
- color: #005c5e;
- font-family: "Capriola", "Roboto", Arial, sans-serif;
+ .oppia-text-centered {
+ padding: 0 25%;
+ margin: 2% 0;
+ text-align: center;
+ width: 100%;
}
-
- .oppia-landing-learn-more,
- .oppia-landing-get-started,
- .oppia-landing-explore-lessons {
- border-radius: 0;
- font-family: "Capriola", "Roboto", Arial, sans-serif;
- font-size: 1.9vmax;
- height: 56px;
- position: relative;
+ .oppia-landing-section-inner {
+ height: auto;
+ margin-left: auto;
+ margin-right: auto;
+ padding-bottom: 8%;
+ padding-top: 6%;
+ }
+ .oppia-lessons-title {
+ margin: 10px;
text-align: center;
- text-transform: uppercase;
- width: 38%;
- z-index: 20;
+ width: 100%;
}
- .oppia-landing-get-started,
- .oppia-landing-explore-lessons {
+ .oppia-landing-page-button {
background-color: #015c53;
+ border-radius: 0;
+ border: 4px solid #265a53;
color: #fff;
- left: 0;
+ font-size: 1.9vmax;
margin-right: 15px;
+ text-transform: uppercase;
+ width: 40%;
}
- .oppia-landing-learn-more {
+ .oppia-make-button-transparent {
background-color: transparent;
- border: 4px solid #265a53;
- box-sizing: border-box;
color: #265a53;
- margin-right: 15px;
}
- .oppia-landing-get-started:hover,
- .oppia-landing-get-started:focus,
- .oppia-landing-get-started:active,
- .oppia-landing-explore-lessons:hover,
- .oppia-landing-explore-lessons:focus,
- .oppia-landing-explore-lessons:active {
+ .oppia-landing-page-button:hover,
+ .oppia-landing-page-button:focus,
+ .oppia-landing-page-button:active {
background-color: #05beb2;
+ border-color: #05beb2;
color: #fff;
}
- .oppia-landing-learn-more:hover,
- .oppia-landing-learn-more:focus,
- .oppia-landing-learn-more:active {
+ .oppia-make-button-transparent:hover,
+ .oppia-make-button-transparent:focus,
+ .oppia-make-button-transparent:active {
+ background-color: transparent;
border-color: #05beb2;
color: #05beb2;
}
- .oppia-landing-centered-h2 {
- left: 0px;
- margin-left: 25%;
- margin-right: 25%;
- padding-left: 0px;
- }
- .library-text {
- margin-left: 22%;
- }
- .oppia-landing-text-box-0,
.oppia-landing-text-box-1,
.oppia-landing-text-box-2 {
margin-left: 60px;
@@ -341,35 +206,15 @@ <h2 class="oppia-landing-h2 oppia-landing-centered-h2 library-text" style="margi
margin-top: 100px;
padding-right: 40px;
}
- .oppia-landing-background-icon-row {
- margin-top: 0;
- margin-bottom: 0;
- margin-left: -webkit-calc((100% - 2700px) / 2);
- margin-left: -moz-calc((100% - 2700px) / 2);
- margin-left: -o-calc((100% - 2700px) / 2);
- margin-left: calc((100% - 2700px) / 2);
- margin-right: -webkit-calc((100% - 2700px) / 2);
- margin-right: -moz-calc((100% - 2700px) / 2);
- margin-right: -o-calc((100% - 2700px) / 2);
- margin-right: calc((100% - 2700px) / 2);
- opacity: 0.4;
- position: relative;
- text-align: center;
- width: 2700px;
- }
- .oppia-landing-background-icon {
- margin: -1px;
- max-width: 96px;
- width: 10%;
- }
.oppia-landing-image {
max-width: 100%;
- max-height: 500px;
- position: relative;
- z-index: 20;
+ max-height: 400px;
}
- .oppia-landing-image-mobile {
- display: none;
+ .oppia-text-color-black {
+ color: #242424;
+ }
+ .oppia-text-color-green {
+ color: #005c5e;
}
.oppia-landing-video-frame {
border: 11px black solid;
@@ -377,7 +222,7 @@ <h2 class="oppia-landing-h2 oppia-landing-centered-h2 library-text" style="margi
border-bottom-width: 50px;
border-radius: 36px;
background: #000000;
- height: 600px;
+ min-height: 500px;
margin: auto;
position: relative;
width: 270px;
@@ -420,60 +265,43 @@ <h2 class="oppia-landing-h2 oppia-landing-centered-h2 library-text" style="margi
.oppia-landing-section {
height: auto;
}
+ .oppia-landing-section-row {
+ display: block;
+ }
.oppia-landing-h1 {
font-size: 1.6em;
- margin-left: 10%;
- margin-right: 20%;
- padding-left: 0;
- padding-right: 0;
padding-top: 40px;
text-align: center;
- width: 80%;
+ white-space: pre-line;
}
.oppia-landing-h2 {
font-size: 1em;
- left: 10%;
+ padding: 0 10%;
text-align: center;
- width: 80%;
+ width: 100%;
+ }
+ .oppia-landing-background-image {
+ zoom: 1.3;
+ }
+ .oppia-landing-image {
+ max-height: 250px;
}
- .oppia-landing-text-box-0,
.oppia-landing-text-box-1,
.oppia-landing-text-box-2 {
margin-left: auto;
}
- .oppia-landing-learn-more,
- .oppia-landing-get-started,
- .oppia-landing-explore-lessons {
+ .oppia-landing-page-button {
font-size: 2.3vmax;
margin-bottom: 30px;
width: auto;
}
- .oppia-landing-get-started-mobile {
- left: 9.5px;
- }
- .oppia-landing-explore-lessons-mobile {
- left: 10.5px;
- }
- .oppia-landing-image-mobile {
- display: block;
- margin: auto;
- }
- .oppia-landing-image-desktop {
- display: none;
- }
- .oppia-landing-centered-h2 {
- left: 0;
- margin-right: 10%;
- }
.oppia-landing-text-box-2 {
margin-top: 0;
padding-right: inherit;
}
}
@media screen and (max-width: 320px) {
- .oppia-landing-learn-more,
- .oppia-landing-get-started,
- .oppia-landing-explore-lessons {
+ .oppia-landing-page-button {
font-size: 2.8vmax;
}
.oppia-landing-h1 {
diff --git a/feconf.py b/feconf.py
index 4542194789af3..34a549dbc8e87 100644
--- a/feconf.py
+++ b/feconf.py
@@ -951,5 +951,5 @@ def get_empty_ratings():
# oppia constant defined in
# core/templates/dev/head/pages/landing-pages/TopicLandingPage.js file.
AVAILABLE_LANDING_PAGES = {
- 'maths': ['fractions', 'ratios']
+ 'maths': ['fractions', 'negative-numbers', 'ratios']
}
|
networkx__networkx-4326 | Use a utf8 friendly latex backend
The current sphinx configuration in docs/conf.py defaults to pdflatex. This is causing problems on #4169 which introduces API-level doctests with unicode characters in them. I tried several iterations of lualatex and xelatex to try and get it to work, but latex errors are never the most helpful.
I will open a PR to resolve this shortly.
| [
{
"content": "from datetime import date\nfrom sphinx_gallery.sorting import ExplicitOrder\nimport sphinx_rtd_theme\nfrom warnings import filterwarnings\n\nfilterwarnings(\n \"ignore\", message=\"Matplotlib is currently using agg\", category=UserWarning\n)\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_gallery.gen_gallery\",\n \"nb2plots\",\n \"texext\",\n]\n\n# https://github.com/sphinx-gallery/sphinx-gallery\nsphinx_gallery_conf = {\n # path to your examples scripts\n \"examples_dirs\": \"../examples\",\n \"subsection_order\": ExplicitOrder(\n [\n \"../examples/basic\",\n \"../examples/drawing\",\n \"../examples/graph\",\n \"../examples/algorithms\",\n \"../examples/advanced\",\n \"../examples/3d_drawing\",\n \"../examples/pygraphviz\",\n \"../examples/geospatial\",\n \"../examples/javascript\",\n \"../examples/jit\",\n \"../examples/applications\",\n \"../examples/subclass\",\n ]\n ),\n # path where to save gallery generated examples\n \"gallery_dirs\": \"auto_examples\",\n \"backreferences_dir\": \"modules/generated\",\n}\n\n# generate autosummary pages\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\nsuppress_warnings = [\"ref.citation\", \"ref.footnote\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# Do not include release announcement template\nexclude_patterns = [\"release/release_template.rst\"]\n\n# General substitutions.\nproject = \"NetworkX\"\ncopyright = f\"2004-{date.today().year}, NetworkX Developers\"\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nimport networkx\n\nversion = networkx.__version__\n# The full version, including dev info\nrelease = networkx.__version__.replace(\"_\", \"\")\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = ['']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# show_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'friendly'\npygments_style = \"sphinx\"\n\n# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)\nmodindex_common_prefix = [\"networkx.\"]\n\ndoctest_global_setup = \"import networkx as nx\"\n\n# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``\nnapoleon_use_param = False\n\n# Options for HTML output\n# -----------------------\n\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://networkx.org/documentation/stable/\",\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/networkx_logo.svg\"\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = \"%b %d, %Y\"\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Content template for the index page.\n# html_index = 'index.html'\n\n# Custom sidebar templates, maps page names to templates.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# templates.\n# html_additional_pages = {'': ''}\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = False\n\nhtml_use_opensearch = \"https://networkx.org\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"NetworkX\"\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = \"letter\"\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n (\n \"reference/index\",\n \"networkx_reference.tex\",\n \"NetworkX Reference\",\n \"Aric Hagberg, Dan Schult, Pieter Swart\",\n \"manual\",\n 1,\n )\n]\n\nlatex_appendices = [\"tutorial\"]\n\n# Intersphinx mapping\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://numpy.org/doc/stable/\": None,\n}\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = \"obj\"\n\nnumpydoc_show_class_members = False\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_js_file(\"copybutton.js\")\n",
"path": "doc/conf.py"
}
] | [
{
"content": "from datetime import date\nfrom sphinx_gallery.sorting import ExplicitOrder\nimport sphinx_rtd_theme\nfrom warnings import filterwarnings\n\nfilterwarnings(\n \"ignore\", message=\"Matplotlib is currently using agg\", category=UserWarning\n)\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_gallery.gen_gallery\",\n \"nb2plots\",\n \"texext\",\n]\n\n# https://github.com/sphinx-gallery/sphinx-gallery\nsphinx_gallery_conf = {\n # path to your examples scripts\n \"examples_dirs\": \"../examples\",\n \"subsection_order\": ExplicitOrder(\n [\n \"../examples/basic\",\n \"../examples/drawing\",\n \"../examples/graph\",\n \"../examples/algorithms\",\n \"../examples/advanced\",\n \"../examples/3d_drawing\",\n \"../examples/pygraphviz\",\n \"../examples/geospatial\",\n \"../examples/javascript\",\n \"../examples/jit\",\n \"../examples/applications\",\n \"../examples/subclass\",\n ]\n ),\n # path where to save gallery generated examples\n \"gallery_dirs\": \"auto_examples\",\n \"backreferences_dir\": \"modules/generated\",\n}\n\n# generate autosummary pages\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\nsuppress_warnings = [\"ref.citation\", \"ref.footnote\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# Do not include release announcement template\nexclude_patterns = [\"release/release_template.rst\"]\n\n# General substitutions.\nproject = \"NetworkX\"\ncopyright = f\"2004-{date.today().year}, NetworkX Developers\"\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nimport networkx\n\nversion = networkx.__version__\n# The full version, including dev info\nrelease = networkx.__version__.replace(\"_\", \"\")\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = ['']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# show_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'friendly'\npygments_style = \"sphinx\"\n\n# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)\nmodindex_common_prefix = [\"networkx.\"]\n\ndoctest_global_setup = \"import networkx as nx\"\n\n# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``\nnapoleon_use_param = False\n\n# Options for HTML output\n# -----------------------\n\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://networkx.org/documentation/stable/\",\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/networkx_logo.svg\"\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = \"%b %d, %Y\"\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Content template for the index page.\n# html_index = 'index.html'\n\n# Custom sidebar templates, maps page names to templates.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# templates.\n# html_additional_pages = {'': ''}\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = False\n\nhtml_use_opensearch = \"https://networkx.org\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"NetworkX\"\n\n# Options for LaTeX output\n# ------------------------\n\n# Use a latex engine that allows for unicode characters in docstrings\nlatex_engine = \"xelatex\"\n# The paper size ('letter' or 'a4').\nlatex_paper_size = \"letter\"\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n (\n \"reference/index\",\n \"networkx_reference.tex\",\n \"NetworkX Reference\",\n \"Aric Hagberg, Dan Schult, Pieter Swart\",\n \"manual\",\n 1,\n )\n]\n\nlatex_appendices = [\"tutorial\"]\n\n# Intersphinx mapping\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://numpy.org/doc/stable/\": None,\n}\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = \"obj\"\n\nnumpydoc_show_class_members = False\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_js_file(\"copybutton.js\")\n",
"path": "doc/conf.py"
}
] | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8ba6d421622..a7ba4d54654 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -23,7 +23,7 @@ jobs:
- run:
name: Install TeX
command: |
- sudo apt-get install texlive texlive-latex-extra latexmk
+ sudo apt-get install texlive texlive-latex-extra latexmk texlive-xetex fonts-freefont-otf xindy
- run:
name: Install cartopy dependencies
diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml
index ae24a8f8b03..8abffa754ad 100644
--- a/.github/workflows/deploy-docs.yml
+++ b/.github/workflows/deploy-docs.yml
@@ -21,7 +21,8 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install libgdal-dev graphviz graphviz-dev
- sudo apt-get install texlive texlive-latex-extra latexmk
+ sudo apt-get install texlive texlive-latex-extra latexmk texlive-xetex
+ sudo apt-get install fonts-freefont-otf xindy
sudo apt-get install libgeos-dev libproj-dev
sudo apt-get install libspatialindex-dev
python3 -m venv ~/venv
diff --git a/.travis.yml b/.travis.yml
index e7bb6ba6cd8..ff15808ae8a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,6 +20,9 @@ matrix:
- texlive
- texlive-latex-extra
- latexmk
+ - texlive-xetex
+ - fonts-freefont-otf
+ - xindy
- libgeos-dev
- libproj-dev
- libspatialindex-dev
diff --git a/doc/conf.py b/doc/conf.py
index b58f9aaea6d..12998e33980 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -170,6 +170,8 @@
# Options for LaTeX output
# ------------------------
+# Use a latex engine that allows for unicode characters in docstrings
+latex_engine = "xelatex"
# The paper size ('letter' or 'a4').
latex_paper_size = "letter"
|
sosreport__sos-3483 | Obtain CNI files for containerd
Containerd uses the CNI configuration present in the defined folders by the configuration
```
[plugins."io.containerd.grpc.v1.cri".cni]
conf_dir = "/etc/cni/net.d
```
It will be very useful to obtain the cni configurations present on the folder for debugging networking related problems
https://github.com/sosreport/sos/blob/b94ced8370824bd62f3c7573ae33fcb96c5da531/sos/report/plugins/containerd.py#L12-L28
| [
{
"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n",
"path": "sos/report/plugins/containerd.py"
}
] | [
{
"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n \"/etc/cni/net.d/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n",
"path": "sos/report/plugins/containerd.py"
}
] | diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py
index 33231da1ab..ecba988721 100644
--- a/sos/report/plugins/containerd.py
+++ b/sos/report/plugins/containerd.py
@@ -19,6 +19,7 @@ class Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):
def setup(self):
self.add_copy_spec([
"/etc/containerd/",
+ "/etc/cni/net.d/",
])
self.add_cmd_output('containerd config dump')
|
bokeh__bokeh-4542 | clustering app example needs updates for recent changes
Fails because `theme.yaml` tries to set `title_text_font_size` on `Plot` This bypasses the (python) property that deprecates this former `Plot` property, and tries to set a (Bokeh) property with that name directly on the plot. This fails, because of the work to make `Title` its own model.
Will fix up the `theme.yaml` and note this problem in migration guide. Since we barely demonstrated and not discussed the theming, hopefully this will not bite many people at all.
| [
{
"content": "import numpy as np\nnp.random.seed(0)\n\nfrom bokeh.io import curdoc\nfrom bokeh.models import ColumnDataSource, VBox, HBox, Select, Slider\nfrom bokeh.plotting import Figure\nfrom bokeh.palettes import Spectral6\n\nfrom sklearn import cluster, datasets\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.preprocessing import StandardScaler\n\n# define some helper functions\ndef clustering(X, algorithm, n_clusters):\n # normalize dataset for easier parameter selection\n X = StandardScaler().fit_transform(X)\n\n # estimate bandwidth for mean shift\n bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)\n\n # connectivity matrix for structured Ward\n connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)\n\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n\n # Generate the new colors:\n if algorithm=='MiniBatchKMeans':\n model = cluster.MiniBatchKMeans(n_clusters=n_clusters)\n\n elif algorithm=='Birch':\n model = cluster.Birch(n_clusters=n_clusters)\n\n elif algorithm=='DBSCAN':\n model = cluster.DBSCAN(eps=.2)\n\n elif algorithm=='AffinityPropagation':\n model = cluster.AffinityPropagation(damping=.9,\n preference=-200)\n\n elif algorithm=='MeanShift':\n model = cluster.MeanShift(bandwidth=bandwidth,\n bin_seeding=True)\n\n elif algorithm=='SpectralClustering':\n model = cluster.SpectralClustering(n_clusters=n_clusters,\n eigen_solver='arpack',\n affinity=\"nearest_neighbors\")\n\n elif algorithm=='Ward':\n model = cluster.AgglomerativeClustering(n_clusters=n_clusters,\n linkage='ward',\n connectivity=connectivity)\n\n elif algorithm=='AgglomerativeClustering':\n model = cluster.AgglomerativeClustering(linkage=\"average\",\n affinity=\"cityblock\",\n n_clusters=n_clusters,\n connectivity=connectivity)\n\n model.fit(X)\n\n if hasattr(model, 'labels_'):\n y_pred = model.labels_.astype(np.int)\n else:\n y_pred = model.predict(X)\n\n return X, y_pred\n\ndef get_dataset(dataset, n_samples):\n if dataset == 'Noisy Circles':\n return datasets.make_circles(n_samples=n_samples,\n factor=0.5,\n noise=0.05)\n\n elif dataset == 'Noisy Moons':\n return datasets.make_moons(n_samples=n_samples,\n noise=0.05)\n\n elif dataset == 'Blobs':\n return datasets.make_blobs(n_samples=n_samples,\n random_state=8)\n\n elif dataset == \"No Structure\":\n return np.random.rand(n_samples, 2), None\n\n# set up initial data\nn_samples = 1500\nn_clusters = 2\nalgorithm = 'MiniBatchKMeans'\ndataset = 'Noisy Circles'\n\nX, y = get_dataset(dataset, n_samples)\nX, y_pred = clustering(X, algorithm, n_clusters)\nspectral = np.hstack([Spectral6] * 20)\ncolors = [spectral[i] for i in y]\n\n# set up plot (styling in theme.yaml)\nplot = Figure(toolbar_location=None, title=algorithm)\nsource = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))\nplot.circle('x', 'y', fill_color='colors', line_color=None, source=source)\n\n# set up widgets\nclustering_algorithms= [\n 'MiniBatchKMeans',\n 'AffinityPropagation',\n 'MeanShift',\n 'SpectralClustering',\n 'Ward',\n 'AgglomerativeClustering',\n 'DBSCAN',\n 'Birch'\n]\n\ndatasets_names = [\n 'Noisy Circles',\n 'Noisy Moons',\n 'Blobs',\n 'No Structure'\n]\n\nalgorithm_select = Select(value='MiniBatchKMeans',\n title='Select algorithm:',\n options=clustering_algorithms)\n\ndataset_select = Select(value='Noisy Circles',\n title='Select dataset:',\n options=datasets_names)\n\nsamples_slider = Slider(title=\"Number of samples\",\n value=1500.0,\n start=1000.0,\n end=3000.0,\n step=100)\n\nclusters_slider = Slider(title=\"Number of clusters\",\n value=2.0,\n start=2.0,\n end=10.0,\n step=1)\n\n# set up callbacks\ndef update_algorithm_or_clusters(attrname, old, new):\n global X\n\n algorithm = algorithm_select.value\n n_clusters = int(clusters_slider.value)\n\n X, y_pred = clustering(X, algorithm, n_clusters)\n colors = [spectral[i] for i in y_pred]\n\n source.data['colors'] = colors\n source.data['x'] = X[:, 0]\n source.data['y'] = X[:, 1]\n\n plot.title = algorithm\n\ndef update_samples_or_dataset(attrname, old, new):\n global X, y\n\n dataset = dataset_select.value\n algorithm = algorithm_select.value\n n_clusters = int(clusters_slider.value)\n n_samples = int(samples_slider.value)\n\n X, y = get_dataset(dataset, n_samples)\n X, y_pred = clustering(X, algorithm, n_clusters)\n colors = [spectral[i] for i in y_pred]\n\n source.data['x'] = X[:, 0]\n source.data['y'] = X[:, 1]\n source.data['colors'] = colors\n\nalgorithm_select.on_change('value', update_algorithm_or_clusters)\nclusters_slider.on_change('value', update_algorithm_or_clusters)\n\ndataset_select.on_change('value', update_samples_or_dataset)\nsamples_slider.on_change('value', update_samples_or_dataset)\n\n# set up layout\nselects = HBox(dataset_select, algorithm_select)\ninputs = VBox(samples_slider, clusters_slider, selects)\n\n# add to document\ncurdoc().add_root(HBox(inputs, plot, width=800))\n",
"path": "examples/app/clustering/main.py"
}
] | [
{
"content": "import numpy as np\nnp.random.seed(0)\n\nfrom bokeh.io import curdoc\nfrom bokeh.models import ColumnDataSource, VBox, HBox, Select, Slider\nfrom bokeh.plotting import Figure\nfrom bokeh.palettes import Spectral6\n\nfrom sklearn import cluster, datasets\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.preprocessing import StandardScaler\n\n# define some helper functions\ndef clustering(X, algorithm, n_clusters):\n # normalize dataset for easier parameter selection\n X = StandardScaler().fit_transform(X)\n\n # estimate bandwidth for mean shift\n bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)\n\n # connectivity matrix for structured Ward\n connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)\n\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n\n # Generate the new colors:\n if algorithm=='MiniBatchKMeans':\n model = cluster.MiniBatchKMeans(n_clusters=n_clusters)\n\n elif algorithm=='Birch':\n model = cluster.Birch(n_clusters=n_clusters)\n\n elif algorithm=='DBSCAN':\n model = cluster.DBSCAN(eps=.2)\n\n elif algorithm=='AffinityPropagation':\n model = cluster.AffinityPropagation(damping=.9,\n preference=-200)\n\n elif algorithm=='MeanShift':\n model = cluster.MeanShift(bandwidth=bandwidth,\n bin_seeding=True)\n\n elif algorithm=='SpectralClustering':\n model = cluster.SpectralClustering(n_clusters=n_clusters,\n eigen_solver='arpack',\n affinity=\"nearest_neighbors\")\n\n elif algorithm=='Ward':\n model = cluster.AgglomerativeClustering(n_clusters=n_clusters,\n linkage='ward',\n connectivity=connectivity)\n\n elif algorithm=='AgglomerativeClustering':\n model = cluster.AgglomerativeClustering(linkage=\"average\",\n affinity=\"cityblock\",\n n_clusters=n_clusters,\n connectivity=connectivity)\n\n model.fit(X)\n\n if hasattr(model, 'labels_'):\n y_pred = model.labels_.astype(np.int)\n else:\n y_pred = model.predict(X)\n\n return X, y_pred\n\ndef get_dataset(dataset, n_samples):\n if dataset == 'Noisy Circles':\n return datasets.make_circles(n_samples=n_samples,\n factor=0.5,\n noise=0.05)\n\n elif dataset == 'Noisy Moons':\n return datasets.make_moons(n_samples=n_samples,\n noise=0.05)\n\n elif dataset == 'Blobs':\n return datasets.make_blobs(n_samples=n_samples,\n random_state=8)\n\n elif dataset == \"No Structure\":\n return np.random.rand(n_samples, 2), None\n\n# set up initial data\nn_samples = 1500\nn_clusters = 2\nalgorithm = 'MiniBatchKMeans'\ndataset = 'Noisy Circles'\n\nX, y = get_dataset(dataset, n_samples)\nX, y_pred = clustering(X, algorithm, n_clusters)\nspectral = np.hstack([Spectral6] * 20)\ncolors = [spectral[i] for i in y]\n\n# set up plot (styling in theme.yaml)\nplot = Figure(toolbar_location=None, title=algorithm)\nsource = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))\nplot.circle('x', 'y', fill_color='colors', line_color=None, source=source)\n\n# set up widgets\nclustering_algorithms= [\n 'MiniBatchKMeans',\n 'AffinityPropagation',\n 'MeanShift',\n 'SpectralClustering',\n 'Ward',\n 'AgglomerativeClustering',\n 'DBSCAN',\n 'Birch'\n]\n\ndatasets_names = [\n 'Noisy Circles',\n 'Noisy Moons',\n 'Blobs',\n 'No Structure'\n]\n\nalgorithm_select = Select(value='MiniBatchKMeans',\n title='Select algorithm:',\n options=clustering_algorithms)\n\ndataset_select = Select(value='Noisy Circles',\n title='Select dataset:',\n options=datasets_names)\n\nsamples_slider = Slider(title=\"Number of samples\",\n value=1500.0,\n start=1000.0,\n end=3000.0,\n step=100)\n\nclusters_slider = Slider(title=\"Number of clusters\",\n value=2.0,\n start=2.0,\n end=10.0,\n step=1)\n\n# set up callbacks\ndef update_algorithm_or_clusters(attrname, old, new):\n global X\n\n algorithm = algorithm_select.value\n n_clusters = int(clusters_slider.value)\n\n X, y_pred = clustering(X, algorithm, n_clusters)\n colors = [spectral[i] for i in y_pred]\n\n source.data['colors'] = colors\n source.data['x'] = X[:, 0]\n source.data['y'] = X[:, 1]\n\n plot.title.text = algorithm\n\ndef update_samples_or_dataset(attrname, old, new):\n global X, y\n\n dataset = dataset_select.value\n algorithm = algorithm_select.value\n n_clusters = int(clusters_slider.value)\n n_samples = int(samples_slider.value)\n\n X, y = get_dataset(dataset, n_samples)\n X, y_pred = clustering(X, algorithm, n_clusters)\n colors = [spectral[i] for i in y_pred]\n\n source.data['x'] = X[:, 0]\n source.data['y'] = X[:, 1]\n source.data['colors'] = colors\n\nalgorithm_select.on_change('value', update_algorithm_or_clusters)\nclusters_slider.on_change('value', update_algorithm_or_clusters)\n\ndataset_select.on_change('value', update_samples_or_dataset)\nsamples_slider.on_change('value', update_samples_or_dataset)\n\n# set up layout\nselects = HBox(dataset_select, algorithm_select)\ninputs = VBox(samples_slider, clusters_slider, selects)\n\n# add to document\ncurdoc().add_root(HBox(inputs, plot, width=800))\n",
"path": "examples/app/clustering/main.py"
}
] | diff --git a/examples/app/clustering/main.py b/examples/app/clustering/main.py
index 1e4ad5e663d..360fb62937a 100644
--- a/examples/app/clustering/main.py
+++ b/examples/app/clustering/main.py
@@ -153,7 +153,7 @@ def update_algorithm_or_clusters(attrname, old, new):
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
- plot.title = algorithm
+ plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
diff --git a/examples/app/clustering/theme.yaml b/examples/app/clustering/theme.yaml
index 804befe1d52..ab939448def 100644
--- a/examples/app/clustering/theme.yaml
+++ b/examples/app/clustering/theme.yaml
@@ -3,9 +3,11 @@ attrs:
Figure:
plot_width: 400
plot_height: 400
- title_text_font_size: '10pt'
background_fill_color: 'lightgrey'
background_fill_alpha: 0.2
Grid:
grid_line_color: null
+
+ Title:
+ text_font_size: '10pt'
|
pytorch__tnt-85 | PyTorch 0.4 test errors
Need to fix these:
```
..........
----------------------------------------------------------------------
Ran 10 tests in 0.015s
OK
E.../Users/szagoruyko/anaconda3/lib/python3.6/site-packages/numpy/core/_methods.py:135: RuntimeWarning: Degrees of freedom <= 0 for slice
keepdims=keepdims)
/Users/szagoruyko/anaconda3/lib/python3.6/site-packages/numpy/core/_methods.py:127: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
.....E
======================================================================
ERROR: testAPMeter (__main__.TestMeters)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_meters.py", line 208, in testAPMeter
ap = mtr.value()
File "/Users/szagoruyko/anaconda3/lib/python3.6/site-packages/torchnet/meter/apmeter.py", line 137, in value
ap[k] = precision[truth.byte()].sum() / max(truth.sum(), 1)
RuntimeError: Expected object of type torch.FloatTensor but found type torch.LongTensor for argument #2 'other'
======================================================================
ERROR: testmAPMeter (__main__.TestMeters)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_meters.py", line 329, in testmAPMeter
ap = mtr.value()
File "/Users/szagoruyko/anaconda3/lib/python3.6/site-packages/torchnet/meter/mapmeter.py", line 30, in value
return self.apmeter.value().mean()
File "/Users/szagoruyko/anaconda3/lib/python3.6/site-packages/torchnet/meter/apmeter.py", line 137, in value
ap[k] = precision[truth.byte()].sum() / max(truth.sum(), 1)
RuntimeError: Expected object of type torch.FloatTensor but found type torch.LongTensor for argument #2 'other'
----------------------------------------------------------------------
Ran 10 tests in 0.118s
FAILED (errors=2)
```
| [
{
"content": "import math\nfrom . import meter\nimport torch\n\n\nclass APMeter(meter.Meter):\n \"\"\"\n The APMeter measures the average precision per class.\n\n The APMeter is designed to operate on `NxK` Tensors `output` and\n `target`, and optionally a `Nx1` Tensor weight where (1) the `output`\n contains model output scores for `N` examples and `K` classes that ought to\n be higher when the model is more convinced that the example should be\n positively labeled, and smaller when the model believes the example should\n be negatively labeled (for instance, the output of a sigmoid function); (2)\n the `target` contains only values 0 (for negative examples) and 1\n (for positive examples); and (3) the `weight` ( > 0) represents weight for\n each sample.\n \"\"\"\n\n def __init__(self):\n super(APMeter, self).__init__()\n self.reset()\n\n def reset(self):\n \"\"\"Resets the meter with empty member variables\"\"\"\n self.scores = torch.FloatTensor(torch.FloatStorage())\n self.targets = torch.LongTensor(torch.LongStorage())\n self.weights = torch.FloatTensor(torch.FloatStorage())\n\n def add(self, output, target, weight=None):\n \"\"\"\n Args:\n output (Tensor): NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model. The probabilities should\n sum to one over all classes\n target (Tensor): binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n weight (optional, Tensor): Nx1 tensor representing the weight for\n each example (each weight > 0)\n \"\"\"\n if not torch.is_tensor(output):\n output = torch.from_numpy(output)\n if not torch.is_tensor(target):\n target = torch.from_numpy(target)\n\n if weight is not None:\n if not torch.is_tensor(weight):\n weight = torch.from_numpy(weight)\n weight = weight.squeeze()\n if output.dim() == 1:\n output = output.view(-1, 1)\n else:\n assert output.dim() == 2, \\\n 'wrong output size (should be 1D or 2D with one column \\\n per class)'\n if target.dim() == 1:\n target = target.view(-1, 1)\n else:\n assert target.dim() == 2, \\\n 'wrong target size (should be 1D or 2D with one column \\\n per class)'\n if weight is not None:\n assert weight.dim() == 1, 'Weight dimension should be 1'\n assert weight.numel() == target.size(0), \\\n 'Weight dimension 1 should be the same as that of target'\n assert torch.min(weight) >= 0, 'Weight should be non-negative only'\n assert torch.equal(target**2, target), \\\n 'targets should be binary (0 or 1)'\n if self.scores.numel() > 0:\n assert target.size(1) == self.targets.size(1), \\\n 'dimensions for output should match previously added examples.'\n\n # make sure storage is of sufficient size\n if self.scores.storage().size() < self.scores.numel() + output.numel():\n new_size = math.ceil(self.scores.storage().size() * 1.5)\n new_weight_size = math.ceil(self.weights.storage().size() * 1.5)\n self.scores.storage().resize_(int(new_size + output.numel()))\n self.targets.storage().resize_(int(new_size + output.numel()))\n if weight is not None:\n self.weights.storage().resize_(int(new_weight_size + output.size(0)))\n\n # store scores and targets\n offset = self.scores.size(0) if self.scores.dim() > 0 else 0\n self.scores.resize_(offset + output.size(0), output.size(1))\n self.targets.resize_(offset + target.size(0), target.size(1))\n self.scores.narrow(0, offset, output.size(0)).copy_(output)\n self.targets.narrow(0, offset, target.size(0)).copy_(target)\n\n if weight is not None:\n self.weights.resize_(offset + weight.size(0))\n self.weights.narrow(0, offset, weight.size(0)).copy_(weight)\n\n def value(self):\n \"\"\"Returns the model's average precision for each class\n\n Return:\n ap (FloatTensor): 1xK tensor, with avg precision for each class k\n \"\"\"\n\n if self.scores.numel() == 0:\n return 0\n ap = torch.zeros(self.scores.size(1))\n if hasattr(torch, \"arange\"):\n rg = torch.arange(1, self.scores.size(0) + 1).float()\n else:\n rg = torch.range(1, self.scores.size(0)).float()\n if self.weights.numel() > 0:\n weight = self.weights.new(self.weights.size())\n weighted_truth = self.weights.new(self.weights.size())\n\n # compute average precision for each class\n for k in range(self.scores.size(1)):\n # sort scores\n scores = self.scores[:, k]\n targets = self.targets[:, k]\n _, sortind = torch.sort(scores, 0, True)\n truth = targets[sortind]\n if self.weights.numel() > 0:\n weight = self.weights[sortind]\n weighted_truth = truth.float() * weight\n rg = weight.cumsum(0)\n\n # compute true positive sums\n if self.weights.numel() > 0:\n tp = weighted_truth.cumsum(0)\n else:\n tp = truth.float().cumsum(0)\n\n # compute precision curve\n precision = tp.div(rg)\n\n # compute average precision\n ap[k] = precision[truth.byte()].sum() / max(truth.sum(), 1)\n return ap\n",
"path": "torchnet/meter/apmeter.py"
}
] | [
{
"content": "import math\nfrom . import meter\nimport torch\n\n\nclass APMeter(meter.Meter):\n \"\"\"\n The APMeter measures the average precision per class.\n\n The APMeter is designed to operate on `NxK` Tensors `output` and\n `target`, and optionally a `Nx1` Tensor weight where (1) the `output`\n contains model output scores for `N` examples and `K` classes that ought to\n be higher when the model is more convinced that the example should be\n positively labeled, and smaller when the model believes the example should\n be negatively labeled (for instance, the output of a sigmoid function); (2)\n the `target` contains only values 0 (for negative examples) and 1\n (for positive examples); and (3) the `weight` ( > 0) represents weight for\n each sample.\n \"\"\"\n\n def __init__(self):\n super(APMeter, self).__init__()\n self.reset()\n\n def reset(self):\n \"\"\"Resets the meter with empty member variables\"\"\"\n self.scores = torch.FloatTensor(torch.FloatStorage())\n self.targets = torch.LongTensor(torch.LongStorage())\n self.weights = torch.FloatTensor(torch.FloatStorage())\n\n def add(self, output, target, weight=None):\n \"\"\"\n Args:\n output (Tensor): NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model. The probabilities should\n sum to one over all classes\n target (Tensor): binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n weight (optional, Tensor): Nx1 tensor representing the weight for\n each example (each weight > 0)\n \"\"\"\n if not torch.is_tensor(output):\n output = torch.from_numpy(output)\n if not torch.is_tensor(target):\n target = torch.from_numpy(target)\n\n if weight is not None:\n if not torch.is_tensor(weight):\n weight = torch.from_numpy(weight)\n weight = weight.squeeze()\n if output.dim() == 1:\n output = output.view(-1, 1)\n else:\n assert output.dim() == 2, \\\n 'wrong output size (should be 1D or 2D with one column \\\n per class)'\n if target.dim() == 1:\n target = target.view(-1, 1)\n else:\n assert target.dim() == 2, \\\n 'wrong target size (should be 1D or 2D with one column \\\n per class)'\n if weight is not None:\n assert weight.dim() == 1, 'Weight dimension should be 1'\n assert weight.numel() == target.size(0), \\\n 'Weight dimension 1 should be the same as that of target'\n assert torch.min(weight) >= 0, 'Weight should be non-negative only'\n assert torch.equal(target**2, target), \\\n 'targets should be binary (0 or 1)'\n if self.scores.numel() > 0:\n assert target.size(1) == self.targets.size(1), \\\n 'dimensions for output should match previously added examples.'\n\n # make sure storage is of sufficient size\n if self.scores.storage().size() < self.scores.numel() + output.numel():\n new_size = math.ceil(self.scores.storage().size() * 1.5)\n new_weight_size = math.ceil(self.weights.storage().size() * 1.5)\n self.scores.storage().resize_(int(new_size + output.numel()))\n self.targets.storage().resize_(int(new_size + output.numel()))\n if weight is not None:\n self.weights.storage().resize_(int(new_weight_size + output.size(0)))\n\n # store scores and targets\n offset = self.scores.size(0) if self.scores.dim() > 0 else 0\n self.scores.resize_(offset + output.size(0), output.size(1))\n self.targets.resize_(offset + target.size(0), target.size(1))\n self.scores.narrow(0, offset, output.size(0)).copy_(output)\n self.targets.narrow(0, offset, target.size(0)).copy_(target)\n\n if weight is not None:\n self.weights.resize_(offset + weight.size(0))\n self.weights.narrow(0, offset, weight.size(0)).copy_(weight)\n\n def value(self):\n \"\"\"Returns the model's average precision for each class\n\n Return:\n ap (FloatTensor): 1xK tensor, with avg precision for each class k\n \"\"\"\n\n if self.scores.numel() == 0:\n return 0\n ap = torch.zeros(self.scores.size(1))\n if hasattr(torch, \"arange\"):\n rg = torch.arange(1, self.scores.size(0) + 1).float()\n else:\n rg = torch.range(1, self.scores.size(0)).float()\n if self.weights.numel() > 0:\n weight = self.weights.new(self.weights.size())\n weighted_truth = self.weights.new(self.weights.size())\n\n # compute average precision for each class\n for k in range(self.scores.size(1)):\n # sort scores\n scores = self.scores[:, k]\n targets = self.targets[:, k]\n _, sortind = torch.sort(scores, 0, True)\n truth = targets[sortind]\n if self.weights.numel() > 0:\n weight = self.weights[sortind]\n weighted_truth = truth.float() * weight\n rg = weight.cumsum(0)\n\n # compute true positive sums\n if self.weights.numel() > 0:\n tp = weighted_truth.cumsum(0)\n else:\n tp = truth.float().cumsum(0)\n\n # compute precision curve\n precision = tp.div(rg)\n\n # compute average precision\n ap[k] = precision[truth.byte()].sum() / max(float(truth.sum()), 1)\n return ap\n",
"path": "torchnet/meter/apmeter.py"
}
] | diff --git a/torchnet/meter/apmeter.py b/torchnet/meter/apmeter.py
index 5058e29e43..57991d1241 100644
--- a/torchnet/meter/apmeter.py
+++ b/torchnet/meter/apmeter.py
@@ -134,5 +134,5 @@ def value(self):
precision = tp.div(rg)
# compute average precision
- ap[k] = precision[truth.byte()].sum() / max(truth.sum(), 1)
+ ap[k] = precision[truth.byte()].sum() / max(float(truth.sum()), 1)
return ap
|
conda__conda-3257 | Zsh.exe not supported on MSYS2
The following error is reported in a MSYS2 zsh shell:
```
➜ dotfiles git:(master) ✗ source activate py35_32
Traceback (most recent call last):
File "C:\Miniconda3\Scripts\conda-script.py", line 5, in <module>
sys.exit(main())
File "C:\Miniconda3\lib\site-packages\conda\cli\main.py", line 48, in main
activate.main()
File "C:\Miniconda3\lib\site-packages\conda\cli\activate.py", line 105, in main
shelldict = shells[shell]
KeyError: 'zsh.exe'
```
| [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport collections\nimport errno\nimport hashlib\nimport logging\nimport os\nimport re\nimport sys\nimport time\nimport threading\nfrom functools import partial\nfrom os.path import isdir, join, basename, exists\n# conda build import\nfrom .common.url import path_to_url\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\non_win = bool(sys.platform == \"win32\")\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(sorted(kw.items())))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n\n@memoized\ndef gnu_get_libc_version():\n \"\"\"\n If on linux, get installed version of glibc, otherwise return None\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None\n\n from ctypes import CDLL, cdll, c_char_p\n\n cdll.LoadLibrary('libc.so.6')\n libc = CDLL('libc.so.6')\n f = libc.gnu_get_libc_version\n f.restype = c_char_p\n return f()\n\n\ndef try_write(dir_path, heavy=False):\n \"\"\"Test write access to a directory.\n\n Args:\n dir_path (str): directory to test write access\n heavy (bool): Actually create and delete a file, or do a faster os.access test.\n https://docs.python.org/dev/library/os.html?highlight=xattr#os.access\n\n Returns:\n bool\n\n \"\"\"\n if not isdir(dir_path):\n return False\n if on_win or heavy:\n # try to create a file to see if `dir_path` is writable, see #2151\n temp_filename = join(dir_path, '.conda-try-write-%d' % os.getpid())\n try:\n with open(temp_filename, mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n backoff_unlink(temp_filename)\n return True\n except (IOError, OSError):\n return False\n finally:\n backoff_unlink(temp_filename)\n else:\n return os.access(dir_path, os.W_OK)\n\n\ndef backoff_unlink(path):\n try:\n exp_backoff_fn(lambda f: exists(f) and os.unlink(f), path)\n except (IOError, OSError) as e:\n if e.errno not in (errno.ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef path_identity(path):\n \"\"\"Used as a dummy path converter where no conversion necessary\"\"\"\n return path\n\n\ndef win_path_to_unix(path, root_prefix=\"\"):\n \"\"\"Convert a path or ;-separated string of paths into a unix representation\n\n Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n \"\"\"\n path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\\/\\\\\\\\]+(?:[^:*?\"<>|]+[\\/\\\\\\\\]+)*[^:*?\"<>|;\\/\\\\\\\\]+?(?![a-zA-Z]:))' # noqa\n\n def _translation(found_path):\n found = found_path.group(1).replace(\"\\\\\", \"/\").replace(\":\", \"\").replace(\"//\", \"/\")\n return root_prefix + \"/\" + found\n path = re.sub(path_re, _translation, path).replace(\";/\", \":/\")\n return path\n\n\ndef unix_path_to_win(path, root_prefix=\"\"):\n \"\"\"Convert a path or :-separated string of paths into a Windows representation\n\n Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n \"\"\"\n if len(path) > 1 and (\";\" in path or (path[1] == \":\" and path.count(\":\") == 1)):\n # already a windows path\n return path.replace(\"/\", \"\\\\\")\n path_re = root_prefix + r'(/[a-zA-Z]/(?:(?![:\\s]/)[^:*?\"<>])*)'\n\n def _translation(found_path):\n group = found_path.group(0)\n return \"{0}:{1}\".format(group[len(root_prefix)+1],\n group[len(root_prefix)+2:].replace(\"/\", \"\\\\\"))\n translation = re.sub(path_re, _translation, path)\n translation = re.sub(\":([a-zA-Z]):\\\\\\\\\",\n lambda match: \";\" + match.group(0)[1] + \":\\\\\",\n translation)\n return translation\n\n\n# curry cygwin functions\ndef win_path_to_cygwin(path):\n return win_path_to_unix(path, \"/cygdrive\")\n\n\ndef cygwin_path_to_win(path):\n return unix_path_to_win(path, \"/cygdrive\")\n\n\ndef translate_stream(stream, translator):\n return \"\\n\".join(translator(line) for line in stream.split(\"\\n\"))\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\n# TODO: this should be done in a more extensible way\n# (like files for each shell, with some registration mechanism.)\n\n# defaults for unix shells. Note: missing \"exe\" entry, which should be set to\n# either an executable on PATH, or a full path to an executable for a shell\nunix_shell_base = dict(\n binpath=\"/bin/\", # mind the trailing slash.\n echo=\"echo\",\n env_script_suffix=\".sh\",\n nul='2>/dev/null',\n path_from=path_identity,\n path_to=path_identity,\n pathsep=\":\",\n printdefaultenv='echo $CONDA_DEFAULT_ENV',\n printpath=\"echo $PATH\",\n printps1='echo $PS1',\n promptvar='PS1',\n sep=\"/\",\n set_var='export ',\n shell_args=[\"-l\", \"-c\"],\n shell_suffix=\"\",\n slash_convert=(\"\\\\\", \"/\"),\n source_setup=\"source\",\n test_echo_extra=\"\",\n var_format=\"${}\",\n)\n\nmsys2_shell_base = dict(\n unix_shell_base,\n path_from=unix_path_to_win,\n path_to=win_path_to_unix,\n binpath=\"/Scripts/\", # mind the trailing slash.\n)\n\nif on_win:\n shells = {\n # \"powershell.exe\": dict(\n # echo=\"echo\",\n # test_echo_extra=\" .\",\n # var_format=\"${var}\",\n # binpath=\"/bin/\", # mind the trailing slash.\n # source_setup=\"source\",\n # nul='2>/dev/null',\n # set_var='export ',\n # shell_suffix=\".ps\",\n # env_script_suffix=\".ps\",\n # printps1='echo $PS1',\n # printdefaultenv='echo $CONDA_DEFAULT_ENV',\n # printpath=\"echo %PATH%\",\n # exe=\"powershell.exe\",\n # path_from=path_identity,\n # path_to=path_identity,\n # slash_convert = (\"/\", \"\\\\\"),\n # ),\n \"cmd.exe\": dict(\n echo=\"@echo\",\n var_format=\"%{}%\",\n binpath=\"\\\\Scripts\\\\\", # mind the trailing slash.\n source_setup=\"call\",\n test_echo_extra=\"\",\n nul='1>NUL 2>&1',\n set_var='set ',\n shell_suffix=\".bat\",\n env_script_suffix=\".bat\",\n printps1=\"@echo %PROMPT%\",\n promptvar=\"PROMPT\",\n # parens mismatched intentionally. See http://stackoverflow.com/questions/20691060/how-do-i-echo-a-blank-empty-line-to-the-console-from-a-windows-batch-file # NOQA\n printdefaultenv='IF NOT \"%CONDA_DEFAULT_ENV%\" == \"\" (\\n'\n 'echo %CONDA_DEFAULT_ENV% ) ELSE (\\n'\n 'echo()',\n printpath=\"@echo %PATH%\",\n exe=\"cmd.exe\",\n shell_args=[\"/d\", \"/c\"],\n path_from=path_identity,\n path_to=path_identity,\n slash_convert=(\"/\", \"\\\\\"),\n sep=\"\\\\\",\n pathsep=\";\",\n ),\n \"cygwin\": dict(\n unix_shell_base,\n exe=\"bash.exe\",\n binpath=\"/Scripts/\", # mind the trailing slash.\n path_from=cygwin_path_to_win,\n path_to=win_path_to_cygwin\n ),\n # bash is whichever bash is on PATH. If using Cygwin, you should use the cygwin\n # entry instead. The only major difference is that it handle's cygwin's /cygdrive\n # filesystem root.\n \"bash.exe\": dict(\n msys2_shell_base, exe=\"bash.exe\",\n ),\n \"bash\": dict(\n msys2_shell_base, exe=\"bash\",\n ),\n \"sh.exe\": dict(\n msys2_shell_base, exe=\"sh.exe\",\n ),\n }\n\nelse:\n shells = {\n \"bash\": dict(\n unix_shell_base, exe=\"bash\",\n ),\n \"zsh\": dict(\n unix_shell_base, exe=\"zsh\",\n ),\n \"fish\": dict(\n unix_shell_base, exe=\"fish\",\n pathsep=\" \",\n ),\n }\n\n\ndef exp_backoff_fn(fn, *args):\n \"\"\"Mostly for retrying file operations that fail on Windows due to virus scanners\"\"\"\n if not on_win:\n return fn(*args)\n\n import random\n # with max_tries = 6, max total time ~= 3.2 sec\n # with max_tries = 7, max total time ~= 6.5 sec\n max_tries = 7\n for n in range(max_tries):\n try:\n result = fn(*args)\n except (OSError, IOError) as e:\n log.debug(repr(e))\n if e.errno in (errno.EPERM, errno.EACCES):\n if n == max_tries-1:\n raise\n sleep_time = ((2 ** n) + random.random()) * 0.1\n caller_frame = sys._getframe(1)\n log.debug(\"retrying %s/%s %s() in %g sec\",\n basename(caller_frame.f_code.co_filename),\n caller_frame.f_lineno, fn.__name__,\n sleep_time)\n time.sleep(sleep_time)\n elif e.errno in (errno.ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n else:\n log.error(\"Uncaught backoff with errno %d\", e.errno)\n raise\n else:\n return result\n\n# put back because of conda build\nurlpath = url_path = path_to_url\n",
"path": "conda/utils.py"
}
] | [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport collections\nimport errno\nimport hashlib\nimport logging\nimport os\nimport re\nimport sys\nimport time\nimport threading\nfrom functools import partial\nfrom os.path import isdir, join, basename, exists\n# conda build import\nfrom .common.url import path_to_url\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\non_win = bool(sys.platform == \"win32\")\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(sorted(kw.items())))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n\n@memoized\ndef gnu_get_libc_version():\n \"\"\"\n If on linux, get installed version of glibc, otherwise return None\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None\n\n from ctypes import CDLL, cdll, c_char_p\n\n cdll.LoadLibrary('libc.so.6')\n libc = CDLL('libc.so.6')\n f = libc.gnu_get_libc_version\n f.restype = c_char_p\n return f()\n\n\ndef try_write(dir_path, heavy=False):\n \"\"\"Test write access to a directory.\n\n Args:\n dir_path (str): directory to test write access\n heavy (bool): Actually create and delete a file, or do a faster os.access test.\n https://docs.python.org/dev/library/os.html?highlight=xattr#os.access\n\n Returns:\n bool\n\n \"\"\"\n if not isdir(dir_path):\n return False\n if on_win or heavy:\n # try to create a file to see if `dir_path` is writable, see #2151\n temp_filename = join(dir_path, '.conda-try-write-%d' % os.getpid())\n try:\n with open(temp_filename, mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n backoff_unlink(temp_filename)\n return True\n except (IOError, OSError):\n return False\n finally:\n backoff_unlink(temp_filename)\n else:\n return os.access(dir_path, os.W_OK)\n\n\ndef backoff_unlink(path):\n try:\n exp_backoff_fn(lambda f: exists(f) and os.unlink(f), path)\n except (IOError, OSError) as e:\n if e.errno not in (errno.ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef path_identity(path):\n \"\"\"Used as a dummy path converter where no conversion necessary\"\"\"\n return path\n\n\ndef win_path_to_unix(path, root_prefix=\"\"):\n \"\"\"Convert a path or ;-separated string of paths into a unix representation\n\n Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n \"\"\"\n path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\\/\\\\\\\\]+(?:[^:*?\"<>|]+[\\/\\\\\\\\]+)*[^:*?\"<>|;\\/\\\\\\\\]+?(?![a-zA-Z]:))' # noqa\n\n def _translation(found_path):\n found = found_path.group(1).replace(\"\\\\\", \"/\").replace(\":\", \"\").replace(\"//\", \"/\")\n return root_prefix + \"/\" + found\n path = re.sub(path_re, _translation, path).replace(\";/\", \":/\")\n return path\n\n\ndef unix_path_to_win(path, root_prefix=\"\"):\n \"\"\"Convert a path or :-separated string of paths into a Windows representation\n\n Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n \"\"\"\n if len(path) > 1 and (\";\" in path or (path[1] == \":\" and path.count(\":\") == 1)):\n # already a windows path\n return path.replace(\"/\", \"\\\\\")\n path_re = root_prefix + r'(/[a-zA-Z]/(?:(?![:\\s]/)[^:*?\"<>])*)'\n\n def _translation(found_path):\n group = found_path.group(0)\n return \"{0}:{1}\".format(group[len(root_prefix)+1],\n group[len(root_prefix)+2:].replace(\"/\", \"\\\\\"))\n translation = re.sub(path_re, _translation, path)\n translation = re.sub(\":([a-zA-Z]):\\\\\\\\\",\n lambda match: \";\" + match.group(0)[1] + \":\\\\\",\n translation)\n return translation\n\n\n# curry cygwin functions\ndef win_path_to_cygwin(path):\n return win_path_to_unix(path, \"/cygdrive\")\n\n\ndef cygwin_path_to_win(path):\n return unix_path_to_win(path, \"/cygdrive\")\n\n\ndef translate_stream(stream, translator):\n return \"\\n\".join(translator(line) for line in stream.split(\"\\n\"))\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\n# TODO: this should be done in a more extensible way\n# (like files for each shell, with some registration mechanism.)\n\n# defaults for unix shells. Note: missing \"exe\" entry, which should be set to\n# either an executable on PATH, or a full path to an executable for a shell\nunix_shell_base = dict(\n binpath=\"/bin/\", # mind the trailing slash.\n echo=\"echo\",\n env_script_suffix=\".sh\",\n nul='2>/dev/null',\n path_from=path_identity,\n path_to=path_identity,\n pathsep=\":\",\n printdefaultenv='echo $CONDA_DEFAULT_ENV',\n printpath=\"echo $PATH\",\n printps1='echo $PS1',\n promptvar='PS1',\n sep=\"/\",\n set_var='export ',\n shell_args=[\"-l\", \"-c\"],\n shell_suffix=\"\",\n slash_convert=(\"\\\\\", \"/\"),\n source_setup=\"source\",\n test_echo_extra=\"\",\n var_format=\"${}\",\n)\n\nmsys2_shell_base = dict(\n unix_shell_base,\n path_from=unix_path_to_win,\n path_to=win_path_to_unix,\n binpath=\"/Scripts/\", # mind the trailing slash.\n)\n\nif on_win:\n shells = {\n # \"powershell.exe\": dict(\n # echo=\"echo\",\n # test_echo_extra=\" .\",\n # var_format=\"${var}\",\n # binpath=\"/bin/\", # mind the trailing slash.\n # source_setup=\"source\",\n # nul='2>/dev/null',\n # set_var='export ',\n # shell_suffix=\".ps\",\n # env_script_suffix=\".ps\",\n # printps1='echo $PS1',\n # printdefaultenv='echo $CONDA_DEFAULT_ENV',\n # printpath=\"echo %PATH%\",\n # exe=\"powershell.exe\",\n # path_from=path_identity,\n # path_to=path_identity,\n # slash_convert = (\"/\", \"\\\\\"),\n # ),\n \"cmd.exe\": dict(\n echo=\"@echo\",\n var_format=\"%{}%\",\n binpath=\"\\\\Scripts\\\\\", # mind the trailing slash.\n source_setup=\"call\",\n test_echo_extra=\"\",\n nul='1>NUL 2>&1',\n set_var='set ',\n shell_suffix=\".bat\",\n env_script_suffix=\".bat\",\n printps1=\"@echo %PROMPT%\",\n promptvar=\"PROMPT\",\n # parens mismatched intentionally. See http://stackoverflow.com/questions/20691060/how-do-i-echo-a-blank-empty-line-to-the-console-from-a-windows-batch-file # NOQA\n printdefaultenv='IF NOT \"%CONDA_DEFAULT_ENV%\" == \"\" (\\n'\n 'echo %CONDA_DEFAULT_ENV% ) ELSE (\\n'\n 'echo()',\n printpath=\"@echo %PATH%\",\n exe=\"cmd.exe\",\n shell_args=[\"/d\", \"/c\"],\n path_from=path_identity,\n path_to=path_identity,\n slash_convert=(\"/\", \"\\\\\"),\n sep=\"\\\\\",\n pathsep=\";\",\n ),\n \"cygwin\": dict(\n unix_shell_base,\n exe=\"bash.exe\",\n binpath=\"/Scripts/\", # mind the trailing slash.\n path_from=cygwin_path_to_win,\n path_to=win_path_to_cygwin\n ),\n # bash is whichever bash is on PATH. If using Cygwin, you should use the cygwin\n # entry instead. The only major difference is that it handle's cygwin's /cygdrive\n # filesystem root.\n \"bash.exe\": dict(\n msys2_shell_base, exe=\"bash.exe\",\n ),\n \"bash\": dict(\n msys2_shell_base, exe=\"bash\",\n ),\n \"sh.exe\": dict(\n msys2_shell_base, exe=\"sh.exe\",\n ),\n \"zsh.exe\": dict(\n msys2_shell_base, exe=\"zsh.exe\",\n ),\n \"zsh\": dict(\n msys2_shell_base, exe=\"zsh\",\n ),\n }\n\nelse:\n shells = {\n \"bash\": dict(\n unix_shell_base, exe=\"bash\",\n ),\n \"zsh\": dict(\n unix_shell_base, exe=\"zsh\",\n ),\n \"fish\": dict(\n unix_shell_base, exe=\"fish\",\n pathsep=\" \",\n ),\n }\n\n\ndef exp_backoff_fn(fn, *args):\n \"\"\"Mostly for retrying file operations that fail on Windows due to virus scanners\"\"\"\n if not on_win:\n return fn(*args)\n\n import random\n # with max_tries = 6, max total time ~= 3.2 sec\n # with max_tries = 7, max total time ~= 6.5 sec\n max_tries = 7\n for n in range(max_tries):\n try:\n result = fn(*args)\n except (OSError, IOError) as e:\n log.debug(repr(e))\n if e.errno in (errno.EPERM, errno.EACCES):\n if n == max_tries-1:\n raise\n sleep_time = ((2 ** n) + random.random()) * 0.1\n caller_frame = sys._getframe(1)\n log.debug(\"retrying %s/%s %s() in %g sec\",\n basename(caller_frame.f_code.co_filename),\n caller_frame.f_lineno, fn.__name__,\n sleep_time)\n time.sleep(sleep_time)\n elif e.errno in (errno.ENOENT,):\n # errno.ENOENT File not found error / No such file or directory\n raise\n else:\n log.error(\"Uncaught backoff with errno %d\", e.errno)\n raise\n else:\n return result\n\n# put back because of conda build\nurlpath = url_path = path_to_url\n",
"path": "conda/utils.py"
}
] | diff --git a/conda/utils.py b/conda/utils.py
index 90b328b85ba..9c8a6f82859 100644
--- a/conda/utils.py
+++ b/conda/utils.py
@@ -313,6 +313,12 @@ def human_bytes(n):
"sh.exe": dict(
msys2_shell_base, exe="sh.exe",
),
+ "zsh.exe": dict(
+ msys2_shell_base, exe="zsh.exe",
+ ),
+ "zsh": dict(
+ msys2_shell_base, exe="zsh",
+ ),
}
else:
|
dask__dask-533 | ProgressBar is not visible in the notebook
The `ProgressBar` doesn't update itself during execution while in the notebook. Afterwards the full bar will pop up but it doesn't give you any cues during execution.
| [
{
"content": "from __future__ import division\nimport sys\nimport threading\nimport time\nfrom timeit import default_timer\n\nfrom ..core import istask\nfrom .core import Diagnostic\n\n\ndef format_time(t):\n \"\"\"Format seconds into a human readable form.\n\n >>> format_time(10.4)\n '10.4s'\n >>> format_time(1000.4)\n '16min 40.4s'\n \"\"\"\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)\n elif m:\n return '{0:2.0f}min {1:4.1f}s'.format(m, s)\n else:\n return '{0:4.1f}s'.format(s)\n\n\nclass ProgressBar(Diagnostic):\n \"\"\"A progress bar for dask.\n\n Can be used as a context manager around dask computations.\n\n Examples\n --------\n >>> with ProgressBar(): # doctest: +SKIP\n ... out = res.compute()\n [########################################] | 100% Completed | 10.4 s\n \"\"\"\n\n def __init__(self, width=40, dt=0.1):\n self._width = width\n self._dt = dt\n\n def _start(self, dsk, state):\n self._ntasks = len([k for (k, v) in dsk.items() if istask(v)])\n self._ndone = 0\n self._update_rate = max(1, self._ntasks // self._width)\n self._start_time = default_timer()\n # Start background thread\n self._running = True\n self._timer = threading.Thread(target=self._timer_func)\n self._timer.start()\n\n def _posttask(self, key, value, dsk, state, id):\n self._ndone += 1\n\n def _finish(self, dsk, state, errored):\n self._running = False\n self._timer.join()\n self._finalize_bar()\n\n def _timer_func(self):\n \"\"\"Background thread for updating the progress bar\"\"\"\n while self._running:\n self._update_bar()\n time.sleep(self._dt)\n\n def _update_bar(self):\n tics = int(self._ndone * self._width / self._ntasks)\n bar = '#' * tics\n percent = (100 * self._ndone) // self._ntasks\n elapsed = format_time(default_timer() - self._start_time)\n msg = '\\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,\n percent, elapsed)\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n def _finalize_bar(self):\n self._update_bar()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n",
"path": "dask/diagnostics/progress.py"
}
] | [
{
"content": "from __future__ import division\nimport sys\nimport threading\nimport time\nfrom timeit import default_timer\n\nfrom ..core import istask\nfrom .core import Diagnostic\n\n\ndef format_time(t):\n \"\"\"Format seconds into a human readable form.\n\n >>> format_time(10.4)\n '10.4s'\n >>> format_time(1000.4)\n '16min 40.4s'\n \"\"\"\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)\n elif m:\n return '{0:2.0f}min {1:4.1f}s'.format(m, s)\n else:\n return '{0:4.1f}s'.format(s)\n\n\nclass ProgressBar(Diagnostic):\n \"\"\"A progress bar for dask.\n\n Can be used as a context manager around dask computations.\n\n Examples\n --------\n >>> with ProgressBar(): # doctest: +SKIP\n ... out = res.compute()\n [########################################] | 100% Completed | 10.4 s\n \"\"\"\n\n def __init__(self, width=40, dt=0.1):\n self._width = width\n self._dt = dt\n\n def _start(self, dsk, state):\n self._ntasks = len([k for (k, v) in dsk.items() if istask(v)])\n self._ndone = 0\n self._update_rate = max(1, self._ntasks // self._width)\n self._start_time = default_timer()\n # Start background thread\n self._running = True\n self._timer = threading.Thread(target=self._timer_func)\n self._timer.start()\n\n def _posttask(self, key, value, dsk, state, id):\n self._ndone += 1\n sys.stdout.flush()\n\n def _finish(self, dsk, state, errored):\n self._running = False\n self._timer.join()\n self._finalize_bar()\n\n def _timer_func(self):\n \"\"\"Background thread for updating the progress bar\"\"\"\n while self._running:\n self._update_bar()\n time.sleep(self._dt)\n\n def _update_bar(self):\n tics = int(self._ndone * self._width / self._ntasks)\n bar = '#' * tics\n percent = (100 * self._ndone) // self._ntasks\n elapsed = format_time(default_timer() - self._start_time)\n msg = '\\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,\n percent, elapsed)\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n def _finalize_bar(self):\n self._update_bar()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n",
"path": "dask/diagnostics/progress.py"
}
] | diff --git a/dask/diagnostics/progress.py b/dask/diagnostics/progress.py
index d79f5476300..08a37459bb0 100644
--- a/dask/diagnostics/progress.py
+++ b/dask/diagnostics/progress.py
@@ -54,6 +54,7 @@ def _start(self, dsk, state):
def _posttask(self, key, value, dsk, state, id):
self._ndone += 1
+ sys.stdout.flush()
def _finish(self, dsk, state, errored):
self._running = False
|
cupy__cupy-5225 | [info] NumPy/SciPy new version pinning recommendation
See:
- https://github.com/numpy/numpy/pull/18505
- scipy/scipy#12862
The most important takeaway is that NumPy/SciPy now recommend downstream distributions to pin the upper bound version if NumPy/Scipy are runtime dependencies. (The example is if the latest NumPy out there is 1.20, one should pin to `<1.23`; the notation used in the docs `<1.xx+3.0` is a bit confusing, see the clarification in https://github.com/scipy/scipy/pull/12862#discussion_r575790007.) There are other suggestions too, but I think this is potentially the most impactful one.
| [
{
"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/_core/include/cupy/cub/',\n 'cupy/_core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\n 'setup': [\n 'Cython>=0.29.22',\n 'fastrlock>=0.5',\n ],\n\n 'install': [\n 'numpy>=1.17',\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4',\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/_core/include/cupy/cub/',\n 'cupy/_core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\n 'setup': [\n 'Cython>=0.29.22',\n 'fastrlock>=0.5',\n ],\n\n 'install': [\n 'numpy>=1.17,<1.23', # see #4773\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4,<1.9', # see #4773\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 200fa0da730..9ba1a739d44 100644
--- a/setup.py
+++ b/setup.py
@@ -31,11 +31,11 @@
],
'install': [
- 'numpy>=1.17',
+ 'numpy>=1.17,<1.23', # see #4773
'fastrlock>=0.5',
],
'all': [
- 'scipy>=1.4',
+ 'scipy>=1.4,<1.9', # see #4773
'optuna>=2.0',
],
|
feast-dev__feast-3966 | Bump the cryptography version to 42
**Is your feature request related to a problem? Please describe.**
`cryptography<42` package has some medium vulnerabilities. Example: https://scout.docker.com/vulnerabilities/id/CVE-2023-50782?s=github&n=cryptography&t=pypi&vr=%3C42.0.0&utm_source=desktop&utm_medium=ExternalLink
starlette and fastapi had some high vulnerabilities but that was recently bumped up and thanks to that, they are removed.
**Describe the solution you'd like**
Bump the cryptography package to>=42. Nice to have: bumping up of other compatible packages also.
| [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<1.25\",\n \"pandas>=1.4.3,<2\",\n # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"pandavro~=1.5.0\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf<4.23.4,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4\",\n \"pydantic>=2.0.0\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard>=4.0.0\",\n \"fastapi>=0.68.0\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n \"importlib-resources>=6.0.0,<7\",\n \"importlib_metadata>=6.8.0,<7\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<3.13.0\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n \"fsspec<=2024.1.0\",\n]\n\nREDIS_REQUIRED = [\n \"redis>=4.2.2,<5\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\", \"fsspec<=2024.1.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<42\",\n \"flake8>=6.0.0,<6.1.0\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-testing>=1.56.2,<2\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<5\",\n \"mypy>=1.4.1\",\n \"avro==1.10.0\",\n \"urllib3>=1.25.4,<3\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests<2.31.0\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"registry\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.8.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"fastavro>=1.1.0,<2\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<1.25\",\n \"pandas>=1.4.3,<2\",\n # For some reason pandavro higher than 1.5.* only support pandas less than 1.3.\n \"pandavro~=1.5.0\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf<4.23.4,>3.20\",\n \"proto-plus>=1.20.0,<2\",\n \"pyarrow>=4\",\n \"pydantic>=2.0.0\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1,<2\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard>=4.0.0\",\n \"fastapi>=0.68.0\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn\",\n \"dask>=2021.1.0\",\n \"bowler\", # Needed for automatic repo upgrades\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n \"importlib-resources>=6.0.0,<7\",\n \"importlib_metadata>=6.8.0,<7\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<3.13.0\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n \"fsspec<=2024.1.0\",\n]\n\nREDIS_REQUIRED = [\n \"redis>=4.2.2,<5\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\", \"fsspec<=2024.1.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<43\",\n \"flake8>=6.0.0,<6.1.0\",\n \"black>=22.6.0,<23\",\n \"isort>=5,<6\",\n \"grpcio-testing>=1.56.2,<2\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<5\",\n \"mypy>=1.4.1\",\n \"avro==1.10.0\",\n \"urllib3>=1.25.4,<3\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests<2.31.0\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n)\n\n\n# rtd builds fail because of mysql not being installed in their environment.\n# We can add mysql there, but it's not strictly needed. This will be faster for builds.\nDOCS_REQUIRED = CI_REQUIRED.copy()\nfor _r in MYSQL_REQUIRED:\n DOCS_REQUIRED.remove(_r)\n\nDEV_REQUIRED = [\"mypy-protobuf==3.1\", \"grpcio-testing~=1.0\"] + CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"registry\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf==3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/sdk/python/requirements/py3.10-ci-requirements.txt b/sdk/python/requirements/py3.10-ci-requirements.txt
index f20bc05df90..051eee0ad1c 100644
--- a/sdk/python/requirements/py3.10-ci-requirements.txt
+++ b/sdk/python/requirements/py3.10-ci-requirements.txt
@@ -124,7 +124,7 @@ comm==0.2.1
# ipywidgets
coverage[toml]==7.4.1
# via pytest-cov
-cryptography==41.0.7
+cryptography==42.0.4
# via
# azure-identity
# azure-storage-blob
@@ -659,7 +659,7 @@ pymysql==1.1.0
# via feast (setup.py)
pyodbc==5.1.0
# via feast (setup.py)
-pyopenssl==23.3.0
+pyopenssl==24.0.0
# via snowflake-connector-python
pyparsing==3.1.1
# via
@@ -805,7 +805,7 @@ sniffio==1.3.0
# httpx
snowballstemmer==2.2.0
# via sphinx
-snowflake-connector-python[pandas]==3.7.0
+snowflake-connector-python[pandas]==3.7.1
# via feast (setup.py)
sortedcontainers==2.4.0
# via snowflake-connector-python
diff --git a/sdk/python/requirements/py3.8-ci-requirements.txt b/sdk/python/requirements/py3.8-ci-requirements.txt
index afa43ec2a2b..bb177f2ec22 100644
--- a/sdk/python/requirements/py3.8-ci-requirements.txt
+++ b/sdk/python/requirements/py3.8-ci-requirements.txt
@@ -131,7 +131,7 @@ comm==0.2.1
# ipywidgets
coverage[toml]==7.4.1
# via pytest-cov
-cryptography==41.0.7
+cryptography==42.0.4
# via
# azure-identity
# azure-storage-blob
@@ -680,7 +680,7 @@ pymysql==1.1.0
# via feast (setup.py)
pyodbc==5.1.0
# via feast (setup.py)
-pyopenssl==23.3.0
+pyopenssl==24.0.0
# via snowflake-connector-python
pyparsing==3.1.1
# via
@@ -829,7 +829,7 @@ sniffio==1.3.0
# httpx
snowballstemmer==2.2.0
# via sphinx
-snowflake-connector-python[pandas]==3.7.0
+snowflake-connector-python[pandas]==3.7.1
# via feast (setup.py)
sortedcontainers==2.4.0
# via snowflake-connector-python
diff --git a/sdk/python/requirements/py3.9-ci-requirements.txt b/sdk/python/requirements/py3.9-ci-requirements.txt
index 6c26f889e27..a4104065bab 100644
--- a/sdk/python/requirements/py3.9-ci-requirements.txt
+++ b/sdk/python/requirements/py3.9-ci-requirements.txt
@@ -124,7 +124,7 @@ comm==0.2.1
# ipywidgets
coverage[toml]==7.4.1
# via pytest-cov
-cryptography==41.0.7
+cryptography==42.0.4
# via
# azure-identity
# azure-storage-blob
@@ -666,7 +666,7 @@ pymysql==1.1.0
# via feast (setup.py)
pyodbc==5.1.0
# via feast (setup.py)
-pyopenssl==23.3.0
+pyopenssl==24.0.0
# via snowflake-connector-python
pyparsing==3.1.1
# via
@@ -814,7 +814,7 @@ sniffio==1.3.0
# httpx
snowballstemmer==2.2.0
# via sphinx
-snowflake-connector-python[pandas]==3.7.0
+snowflake-connector-python[pandas]==3.7.1
# via feast (setup.py)
sortedcontainers==2.4.0
# via snowflake-connector-python
diff --git a/setup.py b/setup.py
index c14d64557a2..4a19b49f168 100644
--- a/setup.py
+++ b/setup.py
@@ -148,7 +148,7 @@
[
"build",
"virtualenv==20.23.0",
- "cryptography>=35.0,<42",
+ "cryptography>=35.0,<43",
"flake8>=6.0.0,<6.1.0",
"black>=22.6.0,<23",
"isort>=5,<6",
|
pytorch__vision-7613 | make_grid doesn't use kwargs
### 🐛 Describe the bug
In the `make_grid` function from `torchvision.utils`,`kwargs` it not used:
https://github.com/pytorch/vision/blob/300a90926e88f13abbaf3d8155cdba36aab86ab4/torchvision/utils.py#LL24C1-L33C19
Is this a bug? It's very easy to mistype some argument and not even notice because no exception is raised.
### Versions
PyTorch version: 2.0.0+cpu
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: Manjaro Linux (x86_64)
GCC version: (GCC) 12.2.1 20230201
Clang version: 15.0.7
CMake version: Could not collect
Libc version: glibc-2.37
Python version: 3.9.16 (main, Mar 8 2023, 14:00:05) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-6.1.29-1-MANJARO-x86_64-with-glibc2.37
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 39 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 4
On-line CPU(s) list: 0-3
Vendor ID: GenuineIntel
Model name: Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz
CPU family: 6
Model: 142
Thread(s) per core: 2
Core(s) per socket: 2
Socket(s): 1
Stepping: 9
CPU(s) scaling MHz: 54%
CPU max MHz: 3500,0000
CPU min MHz: 400,0000
BogoMIPS: 5802,42
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 64 KiB (2 instances)
L1i cache: 64 KiB (2 instances)
L2 cache: 512 KiB (2 instances)
L3 cache: 4 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-3
Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
Vulnerability L1tf: Mitigation; PTE Inversion; VMX conditional cache flushes, SMT vulnerable
Vulnerability Mds: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Mitigation; IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; IBRS, IBPB conditional, STIBP conditional, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Mitigation; Microcode
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] efficientnet-pytorch==0.7.1
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.1
[pip3] segmentation-models-pytorch==0.3.2
[pip3] torch==2.0.0+cpu
[pip3] torchaudio==2.0.1+cpu
[pip3] torchvision==0.15.1+cpu
[conda] efficientnet-pytorch 0.7.1 pypi_0 pypi
[conda] numpy 1.24.1 pypi_0 pypi
[conda] segmentation-models-pytorch 0.3.2 pypi_0 pypi
[conda] torch 2.0.0+cpu pypi_0 pypi
[conda] torchaudio 2.0.1+cpu pypi_0 pypi
[conda] torchvision 0.15.1+cpu pypi_0 pypi
| [
{
"content": "import collections\nimport math\nimport pathlib\nimport warnings\nfrom itertools import repeat\nfrom types import FunctionType\nfrom typing import Any, BinaryIO, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageColor, ImageDraw, ImageFont\n\n__all__ = [\n \"make_grid\",\n \"save_image\",\n \"draw_bounding_boxes\",\n \"draw_segmentation_masks\",\n \"draw_keypoints\",\n \"flow_to_image\",\n]\n\n\[email protected]_grad()\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: float = 0.0,\n **kwargs,\n) -> torch.Tensor:\n \"\"\"\n Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by ``value_range``. Default: ``False``.\n value_range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Returns:\n grid (Tensor): the tensor containing grid of images.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(make_grid)\n if not torch.is_tensor(tensor):\n if isinstance(tensor, list):\n for t in tensor:\n if not torch.is_tensor(t):\n raise TypeError(f\"tensor or list of tensors expected, got a list containing {type(t)}\")\n else:\n raise TypeError(f\"tensor or list of tensors expected, got {type(tensor)}\")\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if value_range is not None and not isinstance(value_range, tuple):\n raise TypeError(\"value_range has to be a tuple (min, max) if specified. min and max are numbers\")\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, value_range):\n if value_range is not None:\n norm_ip(t, value_range[0], value_range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, value_range)\n else:\n norm_range(tensor, value_range)\n\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"tensor should be of type torch.Tensor\")\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\[email protected]_grad()\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[str, pathlib.Path, BinaryIO],\n format: Optional[str] = None,\n **kwargs,\n) -> None:\n \"\"\"\n Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(save_image)\n grid = make_grid(tensor, **kwargs)\n # Add 0.5 after unnormalizing to [0, 255] to round to the nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to(\"cpu\", torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n\n\[email protected]_grad()\ndef draw_bounding_boxes(\n image: torch.Tensor,\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\n fill: Optional[bool] = False,\n width: int = 1,\n font: Optional[str] = None,\n font_size: Optional[int] = None,\n) -> torch.Tensor:\n\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n If fill is True, Resulting Tensor should be saved as PNG image.\n\n Args:\n image (Tensor): Tensor of shape (C x H x W) and dtype uint8.\n boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n `0 <= ymin < ymax < H`.\n labels (List[str]): List containing the labels of bounding boxes.\n colors (color or list of colors, optional): List containing the colors\n of the boxes or single color for all boxes. The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n By default, random colors are generated for boxes.\n fill (bool): If `True` fills the bounding box with specified color.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n font_size (int): The requested font size in points.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_bounding_boxes)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Tensor expected, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size(0) not in {1, 3}:\n raise ValueError(\"Only grayscale and RGB images are supported\")\n elif (boxes[:, 0] > boxes[:, 2]).any() or (boxes[:, 1] > boxes[:, 3]).any():\n raise ValueError(\n \"Boxes need to be in (xmin, ymin, xmax, ymax) format. Use torchvision.ops.box_convert to convert them\"\n )\n\n num_boxes = boxes.shape[0]\n\n if num_boxes == 0:\n warnings.warn(\"boxes doesn't contain any box. No box was drawn\")\n return image\n\n if labels is None:\n labels: Union[List[str], List[None]] = [None] * num_boxes # type: ignore[no-redef]\n elif len(labels) != num_boxes:\n raise ValueError(\n f\"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box.\"\n )\n\n colors = _parse_colors(colors, num_objects=num_boxes)\n\n if font is None:\n if font_size is not None:\n warnings.warn(\"Argument 'font_size' will be ignored since 'font' is not set.\")\n txt_font = ImageFont.load_default()\n else:\n txt_font = ImageFont.truetype(font=font, size=font_size or 10)\n\n # Handle Grayscale images\n if image.size(0) == 1:\n image = torch.tile(image, (3, 1, 1))\n\n ndarr = image.permute(1, 2, 0).cpu().numpy()\n img_to_draw = Image.fromarray(ndarr)\n img_boxes = boxes.to(torch.int64).tolist()\n\n if fill:\n draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\n else:\n draw = ImageDraw.Draw(img_to_draw)\n\n for bbox, color, label in zip(img_boxes, colors, labels): # type: ignore[arg-type]\n if fill:\n fill_color = color + (100,)\n draw.rectangle(bbox, width=width, outline=color, fill=fill_color)\n else:\n draw.rectangle(bbox, width=width, outline=color)\n\n if label is not None:\n margin = width + 1\n draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font)\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)\n\n\[email protected]_grad()\ndef draw_segmentation_masks(\n image: torch.Tensor,\n masks: torch.Tensor,\n alpha: float = 0.8,\n colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\n) -> torch.Tensor:\n\n \"\"\"\n Draws segmentation masks on given RGB image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (3, H, W) and dtype uint8.\n masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool.\n alpha (float): Float number between 0 and 1 denoting the transparency of the masks.\n 0 means full transparency, 1 means no transparency.\n colors (color or list of colors, optional): List containing the colors\n of the masks or single color for all masks. The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n By default, random colors are generated for each mask.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_segmentation_masks)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"The image must be a tensor, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"The image dtype must be uint8, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size()[0] != 3:\n raise ValueError(\"Pass an RGB image. Other Image formats are not supported\")\n if masks.ndim == 2:\n masks = masks[None, :, :]\n if masks.ndim != 3:\n raise ValueError(\"masks must be of shape (H, W) or (batch_size, H, W)\")\n if masks.dtype != torch.bool:\n raise ValueError(f\"The masks must be of dtype bool. Got {masks.dtype}\")\n if masks.shape[-2:] != image.shape[-2:]:\n raise ValueError(\"The image and the masks must have the same height and width\")\n\n num_masks = masks.size()[0]\n\n if num_masks == 0:\n warnings.warn(\"masks doesn't contain any mask. No mask was drawn\")\n return image\n\n out_dtype = torch.uint8\n colors = [torch.tensor(color, dtype=out_dtype) for color in _parse_colors(colors, num_objects=num_masks)]\n\n img_to_draw = image.detach().clone()\n # TODO: There might be a way to vectorize this\n for mask, color in zip(masks, colors):\n img_to_draw[:, mask] = color[:, None]\n\n out = image * (1 - alpha) + img_to_draw * alpha\n return out.to(out_dtype)\n\n\[email protected]_grad()\ndef draw_keypoints(\n image: torch.Tensor,\n keypoints: torch.Tensor,\n connectivity: Optional[List[Tuple[int, int]]] = None,\n colors: Optional[Union[str, Tuple[int, int, int]]] = None,\n radius: int = 2,\n width: int = 3,\n) -> torch.Tensor:\n\n \"\"\"\n Draws Keypoints on given RGB image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (3, H, W) and dtype uint8.\n keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances,\n in the format [x, y].\n connectivity (List[Tuple[int, int]]]): A List of tuple where,\n each tuple contains pair of keypoints to be connected.\n colors (str, Tuple): The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n radius (int): Integer denoting radius of keypoint.\n width (int): Integer denoting width of line connecting keypoints.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_keypoints)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"The image must be a tensor, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"The image dtype must be uint8, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size()[0] != 3:\n raise ValueError(\"Pass an RGB image. Other Image formats are not supported\")\n\n if keypoints.ndim != 3:\n raise ValueError(\"keypoints must be of shape (num_instances, K, 2)\")\n\n ndarr = image.permute(1, 2, 0).cpu().numpy()\n img_to_draw = Image.fromarray(ndarr)\n draw = ImageDraw.Draw(img_to_draw)\n img_kpts = keypoints.to(torch.int64).tolist()\n\n for kpt_id, kpt_inst in enumerate(img_kpts):\n for inst_id, kpt in enumerate(kpt_inst):\n x1 = kpt[0] - radius\n x2 = kpt[0] + radius\n y1 = kpt[1] - radius\n y2 = kpt[1] + radius\n draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0)\n\n if connectivity:\n for connection in connectivity:\n start_pt_x = kpt_inst[connection[0]][0]\n start_pt_y = kpt_inst[connection[0]][1]\n\n end_pt_x = kpt_inst[connection[1]][0]\n end_pt_y = kpt_inst[connection[1]][1]\n\n draw.line(\n ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)),\n width=width,\n )\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)\n\n\n# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization\[email protected]_grad()\ndef flow_to_image(flow: torch.Tensor) -> torch.Tensor:\n\n \"\"\"\n Converts a flow to an RGB image.\n\n Args:\n flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float.\n\n Returns:\n img (Tensor): Image Tensor of dtype uint8 where each color corresponds\n to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input.\n \"\"\"\n\n if flow.dtype != torch.float:\n raise ValueError(f\"Flow should be of dtype torch.float, got {flow.dtype}.\")\n\n orig_shape = flow.shape\n if flow.ndim == 3:\n flow = flow[None] # Add batch dim\n\n if flow.ndim != 4 or flow.shape[1] != 2:\n raise ValueError(f\"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.\")\n\n max_norm = torch.sum(flow**2, dim=1).sqrt().max()\n epsilon = torch.finfo((flow).dtype).eps\n normalized_flow = flow / (max_norm + epsilon)\n img = _normalized_flow_to_image(normalized_flow)\n\n if len(orig_shape) == 3:\n img = img[0] # Remove batch dim\n return img\n\n\[email protected]_grad()\ndef _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:\n\n \"\"\"\n Converts a batch of normalized flow to an RGB image.\n\n Args:\n normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W)\n Returns:\n img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8.\n \"\"\"\n\n N, _, H, W = normalized_flow.shape\n device = normalized_flow.device\n flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)\n colorwheel = _make_colorwheel().to(device) # shape [55x3]\n num_cols = colorwheel.shape[0]\n norm = torch.sum(normalized_flow**2, dim=1).sqrt()\n a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi\n fk = (a + 1) / 2 * (num_cols - 1)\n k0 = torch.floor(fk).to(torch.long)\n k1 = k0 + 1\n k1[k1 == num_cols] = 0\n f = fk - k0\n\n for c in range(colorwheel.shape[1]):\n tmp = colorwheel[:, c]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n col = 1 - norm * (1 - col)\n flow_image[:, c, :, :] = torch.floor(255 * col)\n return flow_image\n\n\ndef _make_colorwheel() -> torch.Tensor:\n \"\"\"\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\" (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf.\n\n Returns:\n colorwheel (Tensor[55, 3]): Colorwheel Tensor.\n \"\"\"\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = torch.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG)\n colorwheel[col : col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col : col + GC, 1] = 255\n colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB)\n colorwheel[col : col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col : col + BM, 2] = 255\n colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR)\n colorwheel[col : col + MR, 0] = 255\n return colorwheel\n\n\ndef _generate_color_palette(num_objects: int):\n palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])\n return [tuple((i * palette) % 255) for i in range(num_objects)]\n\n\ndef _parse_colors(\n colors: Union[None, str, Tuple[int, int, int], List[Union[str, Tuple[int, int, int]]]],\n *,\n num_objects: int,\n) -> List[Tuple[int, int, int]]:\n \"\"\"\n Parses a specification of colors for a set of objects.\n\n Args:\n colors: A specification of colors for the objects. This can be one of the following:\n - None: to generate a color palette automatically.\n - A list of colors: where each color is either a string (specifying a named color) or an RGB tuple.\n - A string or an RGB tuple: to use the same color for all objects.\n\n If `colors` is a tuple, it should be a 3-tuple specifying the RGB values of the color.\n If `colors` is a list, it should have at least as many elements as the number of objects to color.\n\n num_objects (int): The number of objects to color.\n\n Returns:\n A list of 3-tuples, specifying the RGB values of the colors.\n\n Raises:\n ValueError: If the number of colors in the list is less than the number of objects to color.\n If `colors` is not a list, tuple, string or None.\n \"\"\"\n if colors is None:\n colors = _generate_color_palette(num_objects)\n elif isinstance(colors, list):\n if len(colors) < num_objects:\n raise ValueError(\n f\"Number of colors must be equal or larger than the number of objects, but got {len(colors)} < {num_objects}.\"\n )\n elif not isinstance(colors, (tuple, str)):\n raise ValueError(\"`colors` must be a tuple or a string, or a list thereof, but got {colors}.\")\n elif isinstance(colors, tuple) and len(colors) != 3:\n raise ValueError(\"If passed as tuple, colors should be an RGB triplet, but got {colors}.\")\n else: # colors specifies a single color for all objects\n colors = [colors] * num_objects\n\n return [ImageColor.getrgb(color) if isinstance(color, str) else color for color in colors]\n\n\ndef _log_api_usage_once(obj: Any) -> None:\n\n \"\"\"\n Logs API usage(module and name) within an organization.\n In a large ecosystem, it's often useful to track the PyTorch and\n TorchVision APIs usage. This API provides the similar functionality to the\n logging module in the Python stdlib. It can be used for debugging purpose\n to log which methods are used and by default it is inactive, unless the user\n manually subscribes a logger via the `SetAPIUsageLogger method <https://github.com/pytorch/pytorch/blob/eb3b9fe719b21fae13c7a7cf3253f970290a573e/c10/util/Logging.cpp#L114>`_.\n Please note it is triggered only once for the same API call within a process.\n It does not collect any data from open-source users since it is no-op by default.\n For more information, please refer to\n * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging;\n * Logging policy: https://github.com/pytorch/vision/issues/5052;\n\n Args:\n obj (class instance or method): an object to extract info from.\n \"\"\"\n module = obj.__module__\n if not module.startswith(\"torchvision\"):\n module = f\"torchvision.internal.{module}\"\n name = obj.__class__.__name__\n if isinstance(obj, FunctionType):\n name = obj.__name__\n torch._C._log_api_usage_once(f\"{module}.{name}\")\n\n\ndef _make_ntuple(x: Any, n: int) -> Tuple[Any, ...]:\n \"\"\"\n Make n-tuple from input x. If x is an iterable, then we just convert it to tuple.\n Otherwise, we will make a tuple of length n, all with value of x.\n reference: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/utils.py#L8\n\n Args:\n x (Any): input value\n n (int): length of the resulting tuple\n \"\"\"\n if isinstance(x, collections.abc.Iterable):\n return tuple(x)\n return tuple(repeat(x, n))\n",
"path": "torchvision/utils.py"
}
] | [
{
"content": "import collections\nimport math\nimport pathlib\nimport warnings\nfrom itertools import repeat\nfrom types import FunctionType\nfrom typing import Any, BinaryIO, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageColor, ImageDraw, ImageFont\n\n__all__ = [\n \"make_grid\",\n \"save_image\",\n \"draw_bounding_boxes\",\n \"draw_segmentation_masks\",\n \"draw_keypoints\",\n \"flow_to_image\",\n]\n\n\[email protected]_grad()\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: float = 0.0,\n) -> torch.Tensor:\n \"\"\"\n Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by ``value_range``. Default: ``False``.\n value_range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Returns:\n grid (Tensor): the tensor containing grid of images.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(make_grid)\n if not torch.is_tensor(tensor):\n if isinstance(tensor, list):\n for t in tensor:\n if not torch.is_tensor(t):\n raise TypeError(f\"tensor or list of tensors expected, got a list containing {type(t)}\")\n else:\n raise TypeError(f\"tensor or list of tensors expected, got {type(tensor)}\")\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if value_range is not None and not isinstance(value_range, tuple):\n raise TypeError(\"value_range has to be a tuple (min, max) if specified. min and max are numbers\")\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, value_range):\n if value_range is not None:\n norm_ip(t, value_range[0], value_range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, value_range)\n else:\n norm_range(tensor, value_range)\n\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"tensor should be of type torch.Tensor\")\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\[email protected]_grad()\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[str, pathlib.Path, BinaryIO],\n format: Optional[str] = None,\n **kwargs,\n) -> None:\n \"\"\"\n Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(save_image)\n grid = make_grid(tensor, **kwargs)\n # Add 0.5 after unnormalizing to [0, 255] to round to the nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to(\"cpu\", torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n\n\[email protected]_grad()\ndef draw_bounding_boxes(\n image: torch.Tensor,\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\n fill: Optional[bool] = False,\n width: int = 1,\n font: Optional[str] = None,\n font_size: Optional[int] = None,\n) -> torch.Tensor:\n\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n If fill is True, Resulting Tensor should be saved as PNG image.\n\n Args:\n image (Tensor): Tensor of shape (C x H x W) and dtype uint8.\n boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n `0 <= ymin < ymax < H`.\n labels (List[str]): List containing the labels of bounding boxes.\n colors (color or list of colors, optional): List containing the colors\n of the boxes or single color for all boxes. The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n By default, random colors are generated for boxes.\n fill (bool): If `True` fills the bounding box with specified color.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n font_size (int): The requested font size in points.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_bounding_boxes)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Tensor expected, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size(0) not in {1, 3}:\n raise ValueError(\"Only grayscale and RGB images are supported\")\n elif (boxes[:, 0] > boxes[:, 2]).any() or (boxes[:, 1] > boxes[:, 3]).any():\n raise ValueError(\n \"Boxes need to be in (xmin, ymin, xmax, ymax) format. Use torchvision.ops.box_convert to convert them\"\n )\n\n num_boxes = boxes.shape[0]\n\n if num_boxes == 0:\n warnings.warn(\"boxes doesn't contain any box. No box was drawn\")\n return image\n\n if labels is None:\n labels: Union[List[str], List[None]] = [None] * num_boxes # type: ignore[no-redef]\n elif len(labels) != num_boxes:\n raise ValueError(\n f\"Number of boxes ({num_boxes}) and labels ({len(labels)}) mismatch. Please specify labels for each box.\"\n )\n\n colors = _parse_colors(colors, num_objects=num_boxes)\n\n if font is None:\n if font_size is not None:\n warnings.warn(\"Argument 'font_size' will be ignored since 'font' is not set.\")\n txt_font = ImageFont.load_default()\n else:\n txt_font = ImageFont.truetype(font=font, size=font_size or 10)\n\n # Handle Grayscale images\n if image.size(0) == 1:\n image = torch.tile(image, (3, 1, 1))\n\n ndarr = image.permute(1, 2, 0).cpu().numpy()\n img_to_draw = Image.fromarray(ndarr)\n img_boxes = boxes.to(torch.int64).tolist()\n\n if fill:\n draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\n else:\n draw = ImageDraw.Draw(img_to_draw)\n\n for bbox, color, label in zip(img_boxes, colors, labels): # type: ignore[arg-type]\n if fill:\n fill_color = color + (100,)\n draw.rectangle(bbox, width=width, outline=color, fill=fill_color)\n else:\n draw.rectangle(bbox, width=width, outline=color)\n\n if label is not None:\n margin = width + 1\n draw.text((bbox[0] + margin, bbox[1] + margin), label, fill=color, font=txt_font)\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)\n\n\[email protected]_grad()\ndef draw_segmentation_masks(\n image: torch.Tensor,\n masks: torch.Tensor,\n alpha: float = 0.8,\n colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None,\n) -> torch.Tensor:\n\n \"\"\"\n Draws segmentation masks on given RGB image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (3, H, W) and dtype uint8.\n masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool.\n alpha (float): Float number between 0 and 1 denoting the transparency of the masks.\n 0 means full transparency, 1 means no transparency.\n colors (color or list of colors, optional): List containing the colors\n of the masks or single color for all masks. The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n By default, random colors are generated for each mask.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_segmentation_masks)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"The image must be a tensor, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"The image dtype must be uint8, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size()[0] != 3:\n raise ValueError(\"Pass an RGB image. Other Image formats are not supported\")\n if masks.ndim == 2:\n masks = masks[None, :, :]\n if masks.ndim != 3:\n raise ValueError(\"masks must be of shape (H, W) or (batch_size, H, W)\")\n if masks.dtype != torch.bool:\n raise ValueError(f\"The masks must be of dtype bool. Got {masks.dtype}\")\n if masks.shape[-2:] != image.shape[-2:]:\n raise ValueError(\"The image and the masks must have the same height and width\")\n\n num_masks = masks.size()[0]\n\n if num_masks == 0:\n warnings.warn(\"masks doesn't contain any mask. No mask was drawn\")\n return image\n\n out_dtype = torch.uint8\n colors = [torch.tensor(color, dtype=out_dtype) for color in _parse_colors(colors, num_objects=num_masks)]\n\n img_to_draw = image.detach().clone()\n # TODO: There might be a way to vectorize this\n for mask, color in zip(masks, colors):\n img_to_draw[:, mask] = color[:, None]\n\n out = image * (1 - alpha) + img_to_draw * alpha\n return out.to(out_dtype)\n\n\[email protected]_grad()\ndef draw_keypoints(\n image: torch.Tensor,\n keypoints: torch.Tensor,\n connectivity: Optional[List[Tuple[int, int]]] = None,\n colors: Optional[Union[str, Tuple[int, int, int]]] = None,\n radius: int = 2,\n width: int = 3,\n) -> torch.Tensor:\n\n \"\"\"\n Draws Keypoints on given RGB image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (3, H, W) and dtype uint8.\n keypoints (Tensor): Tensor of shape (num_instances, K, 2) the K keypoints location for each of the N instances,\n in the format [x, y].\n connectivity (List[Tuple[int, int]]]): A List of tuple where,\n each tuple contains pair of keypoints to be connected.\n colors (str, Tuple): The color can be represented as\n PIL strings e.g. \"red\" or \"#FF00FF\", or as RGB tuples e.g. ``(240, 10, 157)``.\n radius (int): Integer denoting radius of keypoint.\n width (int): Integer denoting width of line connecting keypoints.\n\n Returns:\n img (Tensor[C, H, W]): Image Tensor of dtype uint8 with keypoints drawn.\n \"\"\"\n\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(draw_keypoints)\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"The image must be a tensor, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"The image dtype must be uint8, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n elif image.size()[0] != 3:\n raise ValueError(\"Pass an RGB image. Other Image formats are not supported\")\n\n if keypoints.ndim != 3:\n raise ValueError(\"keypoints must be of shape (num_instances, K, 2)\")\n\n ndarr = image.permute(1, 2, 0).cpu().numpy()\n img_to_draw = Image.fromarray(ndarr)\n draw = ImageDraw.Draw(img_to_draw)\n img_kpts = keypoints.to(torch.int64).tolist()\n\n for kpt_id, kpt_inst in enumerate(img_kpts):\n for inst_id, kpt in enumerate(kpt_inst):\n x1 = kpt[0] - radius\n x2 = kpt[0] + radius\n y1 = kpt[1] - radius\n y2 = kpt[1] + radius\n draw.ellipse([x1, y1, x2, y2], fill=colors, outline=None, width=0)\n\n if connectivity:\n for connection in connectivity:\n start_pt_x = kpt_inst[connection[0]][0]\n start_pt_y = kpt_inst[connection[0]][1]\n\n end_pt_x = kpt_inst[connection[1]][0]\n end_pt_y = kpt_inst[connection[1]][1]\n\n draw.line(\n ((start_pt_x, start_pt_y), (end_pt_x, end_pt_y)),\n width=width,\n )\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8)\n\n\n# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization\[email protected]_grad()\ndef flow_to_image(flow: torch.Tensor) -> torch.Tensor:\n\n \"\"\"\n Converts a flow to an RGB image.\n\n Args:\n flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float.\n\n Returns:\n img (Tensor): Image Tensor of dtype uint8 where each color corresponds\n to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input.\n \"\"\"\n\n if flow.dtype != torch.float:\n raise ValueError(f\"Flow should be of dtype torch.float, got {flow.dtype}.\")\n\n orig_shape = flow.shape\n if flow.ndim == 3:\n flow = flow[None] # Add batch dim\n\n if flow.ndim != 4 or flow.shape[1] != 2:\n raise ValueError(f\"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.\")\n\n max_norm = torch.sum(flow**2, dim=1).sqrt().max()\n epsilon = torch.finfo((flow).dtype).eps\n normalized_flow = flow / (max_norm + epsilon)\n img = _normalized_flow_to_image(normalized_flow)\n\n if len(orig_shape) == 3:\n img = img[0] # Remove batch dim\n return img\n\n\[email protected]_grad()\ndef _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:\n\n \"\"\"\n Converts a batch of normalized flow to an RGB image.\n\n Args:\n normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W)\n Returns:\n img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8.\n \"\"\"\n\n N, _, H, W = normalized_flow.shape\n device = normalized_flow.device\n flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)\n colorwheel = _make_colorwheel().to(device) # shape [55x3]\n num_cols = colorwheel.shape[0]\n norm = torch.sum(normalized_flow**2, dim=1).sqrt()\n a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi\n fk = (a + 1) / 2 * (num_cols - 1)\n k0 = torch.floor(fk).to(torch.long)\n k1 = k0 + 1\n k1[k1 == num_cols] = 0\n f = fk - k0\n\n for c in range(colorwheel.shape[1]):\n tmp = colorwheel[:, c]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n col = 1 - norm * (1 - col)\n flow_image[:, c, :, :] = torch.floor(255 * col)\n return flow_image\n\n\ndef _make_colorwheel() -> torch.Tensor:\n \"\"\"\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\" (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf.\n\n Returns:\n colorwheel (Tensor[55, 3]): Colorwheel Tensor.\n \"\"\"\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = torch.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = torch.floor(255 * torch.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col : col + YG, 0] = 255 - torch.floor(255 * torch.arange(0, YG) / YG)\n colorwheel[col : col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col : col + GC, 1] = 255\n colorwheel[col : col + GC, 2] = torch.floor(255 * torch.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col : col + CB, 1] = 255 - torch.floor(255 * torch.arange(CB) / CB)\n colorwheel[col : col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col : col + BM, 2] = 255\n colorwheel[col : col + BM, 0] = torch.floor(255 * torch.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col : col + MR, 2] = 255 - torch.floor(255 * torch.arange(MR) / MR)\n colorwheel[col : col + MR, 0] = 255\n return colorwheel\n\n\ndef _generate_color_palette(num_objects: int):\n palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])\n return [tuple((i * palette) % 255) for i in range(num_objects)]\n\n\ndef _parse_colors(\n colors: Union[None, str, Tuple[int, int, int], List[Union[str, Tuple[int, int, int]]]],\n *,\n num_objects: int,\n) -> List[Tuple[int, int, int]]:\n \"\"\"\n Parses a specification of colors for a set of objects.\n\n Args:\n colors: A specification of colors for the objects. This can be one of the following:\n - None: to generate a color palette automatically.\n - A list of colors: where each color is either a string (specifying a named color) or an RGB tuple.\n - A string or an RGB tuple: to use the same color for all objects.\n\n If `colors` is a tuple, it should be a 3-tuple specifying the RGB values of the color.\n If `colors` is a list, it should have at least as many elements as the number of objects to color.\n\n num_objects (int): The number of objects to color.\n\n Returns:\n A list of 3-tuples, specifying the RGB values of the colors.\n\n Raises:\n ValueError: If the number of colors in the list is less than the number of objects to color.\n If `colors` is not a list, tuple, string or None.\n \"\"\"\n if colors is None:\n colors = _generate_color_palette(num_objects)\n elif isinstance(colors, list):\n if len(colors) < num_objects:\n raise ValueError(\n f\"Number of colors must be equal or larger than the number of objects, but got {len(colors)} < {num_objects}.\"\n )\n elif not isinstance(colors, (tuple, str)):\n raise ValueError(\"`colors` must be a tuple or a string, or a list thereof, but got {colors}.\")\n elif isinstance(colors, tuple) and len(colors) != 3:\n raise ValueError(\"If passed as tuple, colors should be an RGB triplet, but got {colors}.\")\n else: # colors specifies a single color for all objects\n colors = [colors] * num_objects\n\n return [ImageColor.getrgb(color) if isinstance(color, str) else color for color in colors]\n\n\ndef _log_api_usage_once(obj: Any) -> None:\n\n \"\"\"\n Logs API usage(module and name) within an organization.\n In a large ecosystem, it's often useful to track the PyTorch and\n TorchVision APIs usage. This API provides the similar functionality to the\n logging module in the Python stdlib. It can be used for debugging purpose\n to log which methods are used and by default it is inactive, unless the user\n manually subscribes a logger via the `SetAPIUsageLogger method <https://github.com/pytorch/pytorch/blob/eb3b9fe719b21fae13c7a7cf3253f970290a573e/c10/util/Logging.cpp#L114>`_.\n Please note it is triggered only once for the same API call within a process.\n It does not collect any data from open-source users since it is no-op by default.\n For more information, please refer to\n * PyTorch note: https://pytorch.org/docs/stable/notes/large_scale_deployments.html#api-usage-logging;\n * Logging policy: https://github.com/pytorch/vision/issues/5052;\n\n Args:\n obj (class instance or method): an object to extract info from.\n \"\"\"\n module = obj.__module__\n if not module.startswith(\"torchvision\"):\n module = f\"torchvision.internal.{module}\"\n name = obj.__class__.__name__\n if isinstance(obj, FunctionType):\n name = obj.__name__\n torch._C._log_api_usage_once(f\"{module}.{name}\")\n\n\ndef _make_ntuple(x: Any, n: int) -> Tuple[Any, ...]:\n \"\"\"\n Make n-tuple from input x. If x is an iterable, then we just convert it to tuple.\n Otherwise, we will make a tuple of length n, all with value of x.\n reference: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/utils.py#L8\n\n Args:\n x (Any): input value\n n (int): length of the resulting tuple\n \"\"\"\n if isinstance(x, collections.abc.Iterable):\n return tuple(x)\n return tuple(repeat(x, n))\n",
"path": "torchvision/utils.py"
}
] | diff --git a/torchvision/utils.py b/torchvision/utils.py
index bc9d88b2849..1418656a7f2 100644
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -29,7 +29,6 @@ def make_grid(
value_range: Optional[Tuple[int, int]] = None,
scale_each: bool = False,
pad_value: float = 0.0,
- **kwargs,
) -> torch.Tensor:
"""
Make a grid of images.
|
localstack__localstack-1695 | Downloading files from localstack S3 generates "Requested Range Not Satisfiable"
Seems related to #1185, but I am copying a tar file (~414MB) up to my localstack S3 instance and trying to download it. Early on in the download (Looks like it's roughly 32MB in) the following is generated
```
+ aws --endpoint-url http://localhost:4572 s3 cp s3://test-bucket/test.tar /tmp/test.copy.tar
download failed: s3://test-bucket/test.tar to ./test.copy.tar An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
```
I am running the following to replicate this:
```
#!/bin/bash
set -x
set -e
# Create Bucket
aws --endpoint-url=http://localhost:4572 s3 mb s3://test-bucket
# Bucket ACL
aws --endpoint-url=http://localhost:4572 s3api put-bucket-acl --bucket test-bucket --acl public-read
# Copy to Bucket
aws --endpoint-url=http://localhost:4572 s3 cp /tmp/test.tar s3://test-bucket/test.tar
# ls bucket
aws --endpoint-url=http://localhost:4572 s3 ls s3://test-bucket/
# Download
aws --endpoint-url http://localhost:4572 s3 cp s3://test-bucket/test.tar /tmp/test.copy.tar
```
Perhaps I am doing something wrong. The command I'm using to copy from s3 works on a real s3 instance, but not with localstack's.
As far as I can tell I'm using the latest localstack image installed w/ `pip3 install localstack[all]`.
```
localstack/localstack latest 06af7745282d 18 hours ago 829MB
```
I also don't think I am running out of Memory, Disk, ect.
And the localstack instance generates the following error messages:
```
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48000)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47988)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48006)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47960)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48012)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47958)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48018)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
```
Downloading files from localstack S3 generates "Requested Range Not Satisfiable"
Seems related to #1185, but I am copying a tar file (~414MB) up to my localstack S3 instance and trying to download it. Early on in the download (Looks like it's roughly 32MB in) the following is generated
```
+ aws --endpoint-url http://localhost:4572 s3 cp s3://test-bucket/test.tar /tmp/test.copy.tar
download failed: s3://test-bucket/test.tar to ./test.copy.tar An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
```
I am running the following to replicate this:
```
#!/bin/bash
set -x
set -e
# Create Bucket
aws --endpoint-url=http://localhost:4572 s3 mb s3://test-bucket
# Bucket ACL
aws --endpoint-url=http://localhost:4572 s3api put-bucket-acl --bucket test-bucket --acl public-read
# Copy to Bucket
aws --endpoint-url=http://localhost:4572 s3 cp /tmp/test.tar s3://test-bucket/test.tar
# ls bucket
aws --endpoint-url=http://localhost:4572 s3 ls s3://test-bucket/
# Download
aws --endpoint-url http://localhost:4572 s3 cp s3://test-bucket/test.tar /tmp/test.copy.tar
```
Perhaps I am doing something wrong. The command I'm using to copy from s3 works on a real s3 instance, but not with localstack's.
As far as I can tell I'm using the latest localstack image installed w/ `pip3 install localstack[all]`.
```
localstack/localstack latest 06af7745282d 18 hours ago 829MB
```
I also don't think I am running out of Memory, Disk, ect.
And the localstack instance generates the following error messages:
```
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48000)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47988)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48006)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47960)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48012)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 47958)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
2019-10-06T12:42:12:WARNING:localstack.services.generic_proxy: Connection prematurely closed by client (broken pipe).
----------------------------------------
Exception happened during processing of request from ('172.17.0.1', 48018)
Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 313, in forward
self.wfile.write(to_bytes(response.content))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 103, in __init__
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
File "/usr/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/usr/lib/python3.6/http/server.py", line 418, in handle
self.handle_one_request()
File "/usr/lib/python3.6/http/server.py", line 406, in handle_one_request
method()
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 128, in do_GET
self.forward('GET')
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 330, in forward
self.end_headers()
File "/usr/lib/python3.6/http/server.py", line 520, in end_headers
self.flush_headers()
File "/usr/lib/python3.6/http/server.py", line 524, in flush_headers
self.wfile.write(b"".join(self._headers_buffer))
File "/usr/lib/python3.6/socketserver.py", line 803, in write
self._sock.sendall(b)
BrokenPipeError: [Errno 32] Broken pipe
----------------------------------------
```
| [
{
"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run)\nfrom localstack.utils.bootstrap import setup_logging\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 128\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None,\n is_versioned=False, version_id=0, max_buffer_size=None, *args, **kwargs):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE, *args, **kwargs)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n",
"path": "localstack/services/s3/s3_starter.py"
}
] | [
{
"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run)\nfrom localstack.utils.bootstrap import setup_logging\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 2048\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None,\n is_versioned=False, version_id=0, max_buffer_size=None, *args, **kwargs):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE, *args, **kwargs)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n",
"path": "localstack/services/s3/s3_starter.py"
}
] | diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py
index 49c60ba000558..428e54db934f1 100644
--- a/localstack/services/s3/s3_starter.py
+++ b/localstack/services/s3/s3_starter.py
@@ -14,7 +14,7 @@
LOGGER = logging.getLogger(__name__)
# max file size for S3 objects (in MB)
-S3_MAX_FILE_SIZE_MB = 128
+S3_MAX_FILE_SIZE_MB = 2048
def check_s3(expect_shutdown=False, print_error=False):
|
scikit-hep__pyhf-126 | test_backend_consistency not resetting to default backend if test fails unexpectedly
# Description
A cascading error is observed when test_backend_consistency fails, which keeps the backend as tensorflow and causes all the other tests to erroneously fail.
<img width="1550" alt="screenshot 2018-04-15 20 45 50" src="https://user-images.githubusercontent.com/761483/38786764-92380ebc-40ef-11e8-921c-fc20a2d96578.png">
Easy to reproduce, run `pytest` and see `test_pdf.py` fail. Run `pytest tests/test_pdf.py` and see that it's fine (as in screenshot).
| [
{
"content": "import logging\nimport pyhf.optimize as optimize\nimport pyhf.tensor as tensor\n\n\nlog = logging.getLogger(__name__)\ntensorlib = tensor.numpy_backend()\noptimizer = optimize.scipy_optimizer()\n\ndef set_backend(backend):\n \"\"\"\n Set the backend and the associated optimizer\n\n Args:\n backend: One of the supported pyhf backends: NumPy,\n TensorFlow, PyTorch, and MXNet\n\n Returns:\n None\n\n Example:\n pyhf.set_backend(tensorflow_backend(session=tf.Session()))\n \"\"\"\n global tensorlib\n global optimizer\n\n tensorlib = backend\n if isinstance(tensorlib, tensor.tensorflow_backend):\n optimizer = optimize.tflow_optimizer(tensorlib)\n elif isinstance(tensorlib,tensor.pytorch_backend):\n optimizer = optimize.pytorch_optimizer(tensorlib=tensorlib)\n # TODO: Add support for mxnet_optimizer()\n # elif isinstance(tensorlib, mxnet_backend):\n # optimizer = mxnet_optimizer()\n else:\n optimizer = optimize.scipy_optimizer()\n\ndef _hfinterp_code0(at_minus_one, at_zero, at_plus_one, alphas):\n at_minus_one = tensorlib.astensor(at_minus_one)\n at_zero = tensorlib.astensor(at_zero)\n at_plus_one = tensorlib.astensor(at_plus_one)\n\n alphas = tensorlib.astensor(alphas)\n\n iplus_izero = at_plus_one - at_zero\n izero_iminus = at_zero - at_minus_one\n\n mask = tensorlib.outer(alphas < 0, tensorlib.ones(iplus_izero.shape))\n return tensorlib.where(mask, tensorlib.outer(alphas, izero_iminus), tensorlib.outer(alphas, iplus_izero))\n\ndef _hfinterp_code1(at_minus_one, at_zero, at_plus_one, alphas):\n at_minus_one = tensorlib.astensor(at_minus_one)\n at_zero = tensorlib.astensor(at_zero)\n at_plus_one = tensorlib.astensor(at_plus_one)\n alphas = tensorlib.astensor(alphas)\n\n base_positive = tensorlib.divide(at_plus_one, at_zero)\n base_negative = tensorlib.divide(at_minus_one, at_zero)\n expo_positive = tensorlib.outer(alphas, tensorlib.ones(base_positive.shape))\n mask = tensorlib.outer(alphas > 0, tensorlib.ones(base_positive.shape))\n bases = tensorlib.where(mask,base_positive,base_negative)\n exponents = tensorlib.where(mask, expo_positive,-expo_positive)\n return tensorlib.power(bases, exponents)\n\nclass normsys_constraint(object):\n\n def __init__(self):\n self.at_zero = 1\n self.at_minus_one = {}\n self.at_plus_one = {}\n self.auxdata = [0] # observed data is always at a = 1\n\n def add_sample(self, channel, sample, modifier_data):\n self.at_minus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['lo']\n self.at_plus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['hi']\n\n def alphas(self, pars):\n return pars # the nuisance parameters correspond directly to the alpha\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\n def pdf(self, a, alpha):\n return tensorlib.normal(a, alpha, 1)\n\nclass histosys_constraint(object):\n\n def __init__(self):\n self.at_zero = {}\n self.at_minus_one = {}\n self.at_plus_one = {}\n self.auxdata = [0] # observed data is always at a = 1\n\n def add_sample(self, channel, sample, modifier_data):\n self.at_zero.setdefault(channel['name'], {})[sample['name']] = sample['data']\n self.at_minus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['lo_data']\n self.at_plus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['hi_data']\n\n def alphas(self, pars):\n return pars # the nuisance parameters correspond directly to the alpha\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\n def pdf(self, a, alpha):\n return tensorlib.normal(a, alpha, [1])\n\n\nclass shapesys_constraint(object):\n\n def __init__(self, nom_data, modifier_data):\n self.auxdata = []\n self.bkg_over_db_squared = []\n for b, deltab in zip(nom_data, modifier_data):\n bkg_over_bsq = b * b / deltab / deltab # tau*b\n log.info('shapesys for b,delta b (%s, %s) -> tau*b = %s',\n b, deltab, bkg_over_bsq)\n self.bkg_over_db_squared.append(bkg_over_bsq)\n self.auxdata.append(bkg_over_bsq)\n\n def alphas(self, pars):\n return tensorlib.product(tensorlib.stack([pars, tensorlib.astensor(self.bkg_over_db_squared)]), axis=0)\n\n def pdf(self, a, alpha):\n return tensorlib.poisson(a, alpha)\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\nclass modelconfig(object):\n @classmethod\n def from_spec(cls,spec,poiname = 'mu'):\n # hacky, need to keep track in which order we added the constraints\n # so that we can generate correctly-ordered data\n instance = cls()\n for channel in spec['channels']:\n for sample in channel['samples']:\n for modifier_def in sample['modifiers']:\n instance.add_modifier_from_def(channel, sample, modifier_def)\n instance.set_poi(poiname)\n return instance\n\n def __init__(self):\n self.poi_index = None\n self.par_map = {}\n self.par_order = []\n self.auxdata = []\n self.auxdata_order = []\n self.next_index = 0\n\n def suggested_init(self):\n init = []\n for name in self.par_order:\n init = init + self.par_map[name]['suggested_init']\n return init\n\n def suggested_bounds(self):\n bounds = []\n for name in self.par_order:\n bounds = bounds + self.par_map[name]['suggested_bounds']\n return bounds\n\n def par_slice(self, name):\n return self.par_map[name]['slice']\n\n def modifier(self, name):\n return self.par_map[name]['modifier']\n\n def set_poi(self,name):\n s = self.par_slice(name)\n assert s.stop-s.start == 1\n self.poi_index = s.start\n\n def add_modifier(self, name, npars, modifier, suggested_init, suggested_bounds):\n is_constraint = type(modifier) in [histosys_constraint, normsys_constraint, shapesys_constraint]\n if name in self.par_map:\n if type(modifier) == normsys_constraint:\n log.info('accepting existing normsys')\n return False\n if type(modifier) == histosys_constraint:\n log.info('accepting existing histosys')\n return False\n if type(modifier) == type(None):\n log.info('accepting existing unconstrained factor ')\n return False\n raise RuntimeError(\n 'shared systematic not implemented yet (processing {})'.format(name))\n log.info('adding modifier %s (%s new nuisance parameters)', name, npars)\n\n sl = slice(self.next_index, self.next_index + npars)\n self.next_index = self.next_index + npars\n self.par_order.append(name)\n self.par_map[name] = {\n 'slice': sl,\n 'modifier': modifier,\n 'suggested_init': suggested_init,\n 'suggested_bounds': suggested_bounds\n }\n if is_constraint:\n self.auxdata += self.modifier(name).auxdata\n self.auxdata_order.append(name)\n return True\n\n def add_modifier_from_def(self, channel, sample, modifier_def):\n if modifier_def['type'] == 'normfactor':\n modifier = None # no object for factors\n self.add_modifier(name=modifier_def['name'],\n modifier=modifier,\n npars=1,\n suggested_init=[1.0],\n suggested_bounds=[[0, 10]])\n if modifier_def['type'] == 'shapefactor':\n modifier = None # no object for factors\n self.add_modifier(name=modifier_def['name'],\n modifier=modifier,\n npars=len(sample['data']),\n suggested_init =[1.0] * len(sample['data']),\n suggested_bounds=[[0, 10]] * len(sample['data'])\n )\n if modifier_def['type'] == 'shapesys':\n # we reserve one parameter for each bin\n modifier = shapesys_constraint(sample['data'], modifier_def['data'])\n self.add_modifier(\n name=modifier_def['name'],\n npars=len(sample['data']),\n suggested_init=[1.0] * len(sample['data']),\n suggested_bounds=[[0, 10]] * len(sample['data']),\n modifier=modifier,\n )\n if modifier_def['type'] == 'normsys':\n modifier = normsys_constraint()\n self.add_modifier(name=modifier_def['name'],\n npars=1,\n modifier=modifier,\n suggested_init=[0.0],\n suggested_bounds=[[-5, 5]])\n self.modifier(modifier_def['name']).add_sample(channel, sample, modifier_def['data'])\n if modifier_def['type'] == 'histosys':\n modifier = histosys_constraint()\n self.add_modifier(\n modifier_def['name'],\n npars=1,\n modifier=modifier,\n suggested_init=[1.0],\n suggested_bounds=[[-5, 5]])\n self.modifier(modifier_def['name']).add_sample(channel, sample, modifier_def['data'])\n\nclass hfpdf(object):\n def __init__(self, spec, **config_kwargs):\n self.config = modelconfig.from_spec(spec,**config_kwargs)\n self.spec = spec\n\n def _multiplicative_factors(self, channel, sample, pars):\n multiplicative_types = ['shapesys', 'normfactor', 'shapefactor']\n modifiers = [m['name'] for m in sample['modifiers'] if m['type'] in multiplicative_types]\n return [pars[self.config.par_slice(m)] for m in modifiers]\n\n def _normsysfactor(self, channel, sample, pars):\n # normsysfactor(nom_sys_alphas) = 1 + sum(interp(1, anchors[i][0],\n # anchors[i][0], val=alpha) for i in range(nom_sys_alphas))\n modifiers = [m['name'] for m in sample['modifiers'] if m['type'] == 'normsys']\n factors = []\n for m in modifiers:\n modifier, modpars = self.config.modifier(m), pars[self.config.par_slice(m)]\n assert int(modpars.shape[0]) == 1\n mod_factor = _hfinterp_code1(modifier.at_minus_one[channel['name']][sample['name']],\n modifier.at_zero,\n modifier.at_plus_one[channel['name']][sample['name']],\n modpars)[0]\n factors.append(mod_factor)\n return tensorlib.product(factors)\n\n def _histosysdelta(self, channel, sample, pars):\n modifiers = [m['name'] for m in sample['modifiers']\n if m['type'] == 'histosys']\n stack = None\n for m in modifiers:\n modifier, modpars = self.config.modifier(m), pars[self.config.par_slice(m)]\n assert int(modpars.shape[0]) == 1\n\n # print 'MODPARS', type(modpars.data)\n\n mod_delta = _hfinterp_code0(modifier.at_minus_one[channel['name']][sample['name']],\n modifier.at_zero[channel['name']][sample['name']],\n modifier.at_plus_one[channel['name']][sample['name']],\n modpars)[0]\n stack = tensorlib.stack([mod_delta]) if stack is None else tensorlib.stack([stack,mod_delta])\n\n return tensorlib.sum(stack, axis=0) if stack is not None else None\n\n def expected_sample(self, channel, sample, pars):\n # for each sample the expected ocunts are\n # counts = (multiplicative factors) * (normsys multiplier) * (histsys delta + nominal hist)\n # = f1*f2*f3*f4* nomsysfactor(nom_sys_alphas) * hist(hist_addition(histosys_alphas) + nomdata)\n # nomsysfactor(nom_sys_alphas) = 1 + sum(interp(1, anchors[i][0], anchors[i][0], val=alpha) for i in range(nom_sys_alphas))\n # hist_addition(histosys_alphas) = sum(interp(nombin, anchors[i][0],\n # anchors[i][0], val=alpha) for i in range(histosys_alphas))\n nom = tensorlib.astensor(sample['data'])\n histosys_delta = self._histosysdelta(channel, sample, pars)\n\n interp_histo = tensorlib.sum(tensorlib.stack([nom, histosys_delta]), axis=0) if (histosys_delta is not None) else nom\n\n factors = []\n factors += self._multiplicative_factors(channel, sample, pars)\n factors += [self._normsysfactor(channel, sample, pars)]\n factors += [interp_histo]\n return tensorlib.product(tensorlib.stack(tensorlib.simple_broadcast(*factors)), axis=0)\n\n def expected_auxdata(self, pars):\n # probably more correctly this should be the expectation value of the constraint_pdf\n # or for the constraints we are using (single par constraings with mean == mode), we can\n # just return the alphas\n\n # order matters! because we generated auxdata in a certain order\n auxdata = None\n for modname in self.config.auxdata_order:\n thisaux = self.config.modifier(modname).expected_data(\n pars[self.config.par_slice(modname)])\n tocat = [thisaux] if auxdata is None else [auxdata, thisaux]\n auxdata = tensorlib.concatenate(tocat)\n return auxdata\n\n def expected_actualdata(self, pars):\n pars = tensorlib.astensor(pars)\n data = []\n for channel in self.spec['channels']:\n data.append(tensorlib.sum(tensorlib.stack([self.expected_sample(channel, sample, pars) for sample in channel['samples']]),axis=0))\n return tensorlib.concatenate(data)\n\n def expected_data(self, pars, include_auxdata=True):\n pars = tensorlib.astensor(pars)\n expected_actual = self.expected_actualdata(pars)\n\n if not include_auxdata:\n return expected_actual\n expected_constraints = self.expected_auxdata(pars)\n tocat = [expected_actual] if expected_constraints is None else [expected_actual,expected_constraints]\n return tensorlib.concatenate(tocat)\n\n def constraint_logpdf(self, auxdata, pars):\n # iterate over all constraints order doesn't matter....\n start_index = 0\n summands = None\n for cname in self.config.auxdata_order:\n modifier, modslice = self.config.modifier(cname), \\\n self.config.par_slice(cname)\n modalphas = modifier.alphas(pars[modslice])\n end_index = start_index + int(modalphas.shape[0])\n thisauxdata = auxdata[start_index:end_index]\n start_index = end_index\n constraint_term = tensorlib.log(modifier.pdf(thisauxdata, modalphas))\n summands = constraint_term if summands is None else tensorlib.concatenate([summands,constraint_term])\n return tensorlib.sum(summands) if summands is not None else 0\n\n def logpdf(self, pars, data):\n pars, data = tensorlib.astensor(pars), tensorlib.astensor(data)\n cut = int(data.shape[0]) - len(self.config.auxdata)\n actual_data, aux_data = data[:cut], data[cut:]\n lambdas_data = self.expected_actualdata(pars)\n summands = tensorlib.log(tensorlib.poisson(actual_data, lambdas_data))\n\n result = tensorlib.sum(summands) + self.constraint_logpdf(aux_data, pars)\n return tensorlib.astensor(result) * tensorlib.ones((1)) #ensure (1,) array shape also for numpy\n\n def pdf(self, pars, data):\n return tensorlib.exp(self.logpdf(pars, data))\n\n\ndef generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds):\n bestfit_nuisance_asimov = optimizer.constrained_bestfit(\n loglambdav, asimov_mu, data, pdf, init_pars, par_bounds)\n return pdf.expected_data(bestfit_nuisance_asimov)\n\n##########################\n\n\ndef loglambdav(pars, data, pdf):\n return -2 * pdf.logpdf(pars, data)\n\ndef qmu(mu, data, pdf, init_pars, par_bounds):\n # The Test Statistic\n mubhathat = tensorlib.tolist(optimizer.constrained_bestfit(loglambdav, mu, data, pdf, init_pars, par_bounds))\n muhatbhat = tensorlib.tolist(optimizer.unconstrained_bestfit(loglambdav, data, pdf, init_pars, par_bounds))\n qmu = tensorlib.tolist(loglambdav(mubhathat, data, pdf) - loglambdav(muhatbhat, data, pdf))[0]\n if muhatbhat[pdf.config.poi_index] > mu:\n return 0.0\n if -1e-6 < qmu < 0:\n log.warning('WARNING: qmu negative: %s', qmu)\n return 0.0\n return qmu\n\nfrom scipy.stats import norm\ndef pvals_from_teststat(sqrtqmu_v, sqrtqmuA_v):\n CLsb = 1 - norm.cdf(sqrtqmu_v)\n CLb = norm.cdf(sqrtqmuA_v - sqrtqmu_v)\n CLs = CLb / CLsb\n return CLsb, CLb, CLs\n\nimport math\ndef runOnePoint(muTest, data, pdf, init_pars, par_bounds):\n asimov_mu = 0.0\n asimov_data = tensorlib.tolist(generate_asimov_data(asimov_mu, data,\n pdf, init_pars, par_bounds))\n\n qmu_v = qmu(muTest, data, pdf, init_pars, par_bounds)\n qmuA_v = qmu(muTest, asimov_data, pdf, init_pars, par_bounds)\n\n sqrtqmu_v = math.sqrt(qmu_v)\n sqrtqmuA_v = math.sqrt(qmuA_v)\n\n sigma = muTest / sqrtqmuA_v if sqrtqmuA_v > 0 else None\n\n CLsb, CLb, CLs = pvals_from_teststat(sqrtqmu_v, sqrtqmuA_v)\n\n CLs_exp = []\n for nsigma in [-2, -1, 0, 1, 2]:\n sqrtqmu_v_sigma = sqrtqmuA_v - nsigma\n CLs_exp.append(pvals_from_teststat(sqrtqmu_v_sigma, sqrtqmuA_v)[-1])\n return qmu_v, qmuA_v, sigma, CLsb, CLb, CLs, CLs_exp\n",
"path": "pyhf/__init__.py"
}
] | [
{
"content": "import logging\nimport pyhf.optimize as optimize\nimport pyhf.tensor as tensor\n\n\nlog = logging.getLogger(__name__)\ntensorlib = tensor.numpy_backend()\ndefault_backend = tensorlib\noptimizer = optimize.scipy_optimizer()\ndefault_optimizer = optimizer\n\ndef set_backend(backend):\n \"\"\"\n Set the backend and the associated optimizer\n\n Args:\n backend: One of the supported pyhf backends: NumPy,\n TensorFlow, PyTorch, and MXNet\n\n Returns:\n None\n\n Example:\n pyhf.set_backend(tensorflow_backend(session=tf.Session()))\n \"\"\"\n global tensorlib\n global optimizer\n\n tensorlib = backend\n if isinstance(tensorlib, tensor.tensorflow_backend):\n optimizer = optimize.tflow_optimizer(tensorlib)\n elif isinstance(tensorlib,tensor.pytorch_backend):\n optimizer = optimize.pytorch_optimizer(tensorlib=tensorlib)\n # TODO: Add support for mxnet_optimizer()\n # elif isinstance(tensorlib, mxnet_backend):\n # optimizer = mxnet_optimizer()\n else:\n optimizer = optimize.scipy_optimizer()\n\ndef _hfinterp_code0(at_minus_one, at_zero, at_plus_one, alphas):\n at_minus_one = tensorlib.astensor(at_minus_one)\n at_zero = tensorlib.astensor(at_zero)\n at_plus_one = tensorlib.astensor(at_plus_one)\n\n alphas = tensorlib.astensor(alphas)\n\n iplus_izero = at_plus_one - at_zero\n izero_iminus = at_zero - at_minus_one\n\n mask = tensorlib.outer(alphas < 0, tensorlib.ones(iplus_izero.shape))\n return tensorlib.where(mask, tensorlib.outer(alphas, izero_iminus), tensorlib.outer(alphas, iplus_izero))\n\ndef _hfinterp_code1(at_minus_one, at_zero, at_plus_one, alphas):\n at_minus_one = tensorlib.astensor(at_minus_one)\n at_zero = tensorlib.astensor(at_zero)\n at_plus_one = tensorlib.astensor(at_plus_one)\n alphas = tensorlib.astensor(alphas)\n\n base_positive = tensorlib.divide(at_plus_one, at_zero)\n base_negative = tensorlib.divide(at_minus_one, at_zero)\n expo_positive = tensorlib.outer(alphas, tensorlib.ones(base_positive.shape))\n mask = tensorlib.outer(alphas > 0, tensorlib.ones(base_positive.shape))\n bases = tensorlib.where(mask,base_positive,base_negative)\n exponents = tensorlib.where(mask, expo_positive,-expo_positive)\n return tensorlib.power(bases, exponents)\n\nclass normsys_constraint(object):\n\n def __init__(self):\n self.at_zero = 1\n self.at_minus_one = {}\n self.at_plus_one = {}\n self.auxdata = [0] # observed data is always at a = 1\n\n def add_sample(self, channel, sample, modifier_data):\n self.at_minus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['lo']\n self.at_plus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['hi']\n\n def alphas(self, pars):\n return pars # the nuisance parameters correspond directly to the alpha\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\n def pdf(self, a, alpha):\n return tensorlib.normal(a, alpha, 1)\n\nclass histosys_constraint(object):\n\n def __init__(self):\n self.at_zero = {}\n self.at_minus_one = {}\n self.at_plus_one = {}\n self.auxdata = [0] # observed data is always at a = 1\n\n def add_sample(self, channel, sample, modifier_data):\n self.at_zero.setdefault(channel['name'], {})[sample['name']] = sample['data']\n self.at_minus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['lo_data']\n self.at_plus_one.setdefault(channel['name'], {})[sample['name']] = modifier_data['hi_data']\n\n def alphas(self, pars):\n return pars # the nuisance parameters correspond directly to the alpha\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\n def pdf(self, a, alpha):\n return tensorlib.normal(a, alpha, [1])\n\n\nclass shapesys_constraint(object):\n\n def __init__(self, nom_data, modifier_data):\n self.auxdata = []\n self.bkg_over_db_squared = []\n for b, deltab in zip(nom_data, modifier_data):\n bkg_over_bsq = b * b / deltab / deltab # tau*b\n log.info('shapesys for b,delta b (%s, %s) -> tau*b = %s',\n b, deltab, bkg_over_bsq)\n self.bkg_over_db_squared.append(bkg_over_bsq)\n self.auxdata.append(bkg_over_bsq)\n\n def alphas(self, pars):\n return tensorlib.product(tensorlib.stack([pars, tensorlib.astensor(self.bkg_over_db_squared)]), axis=0)\n\n def pdf(self, a, alpha):\n return tensorlib.poisson(a, alpha)\n\n def expected_data(self, pars):\n return self.alphas(pars)\n\nclass modelconfig(object):\n @classmethod\n def from_spec(cls,spec,poiname = 'mu'):\n # hacky, need to keep track in which order we added the constraints\n # so that we can generate correctly-ordered data\n instance = cls()\n for channel in spec['channels']:\n for sample in channel['samples']:\n for modifier_def in sample['modifiers']:\n instance.add_modifier_from_def(channel, sample, modifier_def)\n instance.set_poi(poiname)\n return instance\n\n def __init__(self):\n self.poi_index = None\n self.par_map = {}\n self.par_order = []\n self.auxdata = []\n self.auxdata_order = []\n self.next_index = 0\n\n def suggested_init(self):\n init = []\n for name in self.par_order:\n init = init + self.par_map[name]['suggested_init']\n return init\n\n def suggested_bounds(self):\n bounds = []\n for name in self.par_order:\n bounds = bounds + self.par_map[name]['suggested_bounds']\n return bounds\n\n def par_slice(self, name):\n return self.par_map[name]['slice']\n\n def modifier(self, name):\n return self.par_map[name]['modifier']\n\n def set_poi(self,name):\n s = self.par_slice(name)\n assert s.stop-s.start == 1\n self.poi_index = s.start\n\n def add_modifier(self, name, npars, modifier, suggested_init, suggested_bounds):\n is_constraint = type(modifier) in [histosys_constraint, normsys_constraint, shapesys_constraint]\n if name in self.par_map:\n if type(modifier) == normsys_constraint:\n log.info('accepting existing normsys')\n return False\n if type(modifier) == histosys_constraint:\n log.info('accepting existing histosys')\n return False\n if type(modifier) == type(None):\n log.info('accepting existing unconstrained factor ')\n return False\n raise RuntimeError(\n 'shared systematic not implemented yet (processing {})'.format(name))\n log.info('adding modifier %s (%s new nuisance parameters)', name, npars)\n\n sl = slice(self.next_index, self.next_index + npars)\n self.next_index = self.next_index + npars\n self.par_order.append(name)\n self.par_map[name] = {\n 'slice': sl,\n 'modifier': modifier,\n 'suggested_init': suggested_init,\n 'suggested_bounds': suggested_bounds\n }\n if is_constraint:\n self.auxdata += self.modifier(name).auxdata\n self.auxdata_order.append(name)\n return True\n\n def add_modifier_from_def(self, channel, sample, modifier_def):\n if modifier_def['type'] == 'normfactor':\n modifier = None # no object for factors\n self.add_modifier(name=modifier_def['name'],\n modifier=modifier,\n npars=1,\n suggested_init=[1.0],\n suggested_bounds=[[0, 10]])\n if modifier_def['type'] == 'shapefactor':\n modifier = None # no object for factors\n self.add_modifier(name=modifier_def['name'],\n modifier=modifier,\n npars=len(sample['data']),\n suggested_init =[1.0] * len(sample['data']),\n suggested_bounds=[[0, 10]] * len(sample['data'])\n )\n if modifier_def['type'] == 'shapesys':\n # we reserve one parameter for each bin\n modifier = shapesys_constraint(sample['data'], modifier_def['data'])\n self.add_modifier(\n name=modifier_def['name'],\n npars=len(sample['data']),\n suggested_init=[1.0] * len(sample['data']),\n suggested_bounds=[[0, 10]] * len(sample['data']),\n modifier=modifier,\n )\n if modifier_def['type'] == 'normsys':\n modifier = normsys_constraint()\n self.add_modifier(name=modifier_def['name'],\n npars=1,\n modifier=modifier,\n suggested_init=[0.0],\n suggested_bounds=[[-5, 5]])\n self.modifier(modifier_def['name']).add_sample(channel, sample, modifier_def['data'])\n if modifier_def['type'] == 'histosys':\n modifier = histosys_constraint()\n self.add_modifier(\n modifier_def['name'],\n npars=1,\n modifier=modifier,\n suggested_init=[1.0],\n suggested_bounds=[[-5, 5]])\n self.modifier(modifier_def['name']).add_sample(channel, sample, modifier_def['data'])\n\nclass hfpdf(object):\n def __init__(self, spec, **config_kwargs):\n self.config = modelconfig.from_spec(spec,**config_kwargs)\n self.spec = spec\n\n def _multiplicative_factors(self, channel, sample, pars):\n multiplicative_types = ['shapesys', 'normfactor', 'shapefactor']\n modifiers = [m['name'] for m in sample['modifiers'] if m['type'] in multiplicative_types]\n return [pars[self.config.par_slice(m)] for m in modifiers]\n\n def _normsysfactor(self, channel, sample, pars):\n # normsysfactor(nom_sys_alphas) = 1 + sum(interp(1, anchors[i][0],\n # anchors[i][0], val=alpha) for i in range(nom_sys_alphas))\n modifiers = [m['name'] for m in sample['modifiers'] if m['type'] == 'normsys']\n factors = []\n for m in modifiers:\n modifier, modpars = self.config.modifier(m), pars[self.config.par_slice(m)]\n assert int(modpars.shape[0]) == 1\n mod_factor = _hfinterp_code1(modifier.at_minus_one[channel['name']][sample['name']],\n modifier.at_zero,\n modifier.at_plus_one[channel['name']][sample['name']],\n modpars)[0]\n factors.append(mod_factor)\n return tensorlib.product(factors)\n\n def _histosysdelta(self, channel, sample, pars):\n modifiers = [m['name'] for m in sample['modifiers']\n if m['type'] == 'histosys']\n stack = None\n for m in modifiers:\n modifier, modpars = self.config.modifier(m), pars[self.config.par_slice(m)]\n assert int(modpars.shape[0]) == 1\n\n # print 'MODPARS', type(modpars.data)\n\n mod_delta = _hfinterp_code0(modifier.at_minus_one[channel['name']][sample['name']],\n modifier.at_zero[channel['name']][sample['name']],\n modifier.at_plus_one[channel['name']][sample['name']],\n modpars)[0]\n stack = tensorlib.stack([mod_delta]) if stack is None else tensorlib.stack([stack,mod_delta])\n\n return tensorlib.sum(stack, axis=0) if stack is not None else None\n\n def expected_sample(self, channel, sample, pars):\n # for each sample the expected ocunts are\n # counts = (multiplicative factors) * (normsys multiplier) * (histsys delta + nominal hist)\n # = f1*f2*f3*f4* nomsysfactor(nom_sys_alphas) * hist(hist_addition(histosys_alphas) + nomdata)\n # nomsysfactor(nom_sys_alphas) = 1 + sum(interp(1, anchors[i][0], anchors[i][0], val=alpha) for i in range(nom_sys_alphas))\n # hist_addition(histosys_alphas) = sum(interp(nombin, anchors[i][0],\n # anchors[i][0], val=alpha) for i in range(histosys_alphas))\n nom = tensorlib.astensor(sample['data'])\n histosys_delta = self._histosysdelta(channel, sample, pars)\n\n interp_histo = tensorlib.sum(tensorlib.stack([nom, histosys_delta]), axis=0) if (histosys_delta is not None) else nom\n\n factors = []\n factors += self._multiplicative_factors(channel, sample, pars)\n factors += [self._normsysfactor(channel, sample, pars)]\n factors += [interp_histo]\n return tensorlib.product(tensorlib.stack(tensorlib.simple_broadcast(*factors)), axis=0)\n\n def expected_auxdata(self, pars):\n # probably more correctly this should be the expectation value of the constraint_pdf\n # or for the constraints we are using (single par constraings with mean == mode), we can\n # just return the alphas\n\n # order matters! because we generated auxdata in a certain order\n auxdata = None\n for modname in self.config.auxdata_order:\n thisaux = self.config.modifier(modname).expected_data(\n pars[self.config.par_slice(modname)])\n tocat = [thisaux] if auxdata is None else [auxdata, thisaux]\n auxdata = tensorlib.concatenate(tocat)\n return auxdata\n\n def expected_actualdata(self, pars):\n pars = tensorlib.astensor(pars)\n data = []\n for channel in self.spec['channels']:\n data.append(tensorlib.sum(tensorlib.stack([self.expected_sample(channel, sample, pars) for sample in channel['samples']]),axis=0))\n return tensorlib.concatenate(data)\n\n def expected_data(self, pars, include_auxdata=True):\n pars = tensorlib.astensor(pars)\n expected_actual = self.expected_actualdata(pars)\n\n if not include_auxdata:\n return expected_actual\n expected_constraints = self.expected_auxdata(pars)\n tocat = [expected_actual] if expected_constraints is None else [expected_actual,expected_constraints]\n return tensorlib.concatenate(tocat)\n\n def constraint_logpdf(self, auxdata, pars):\n # iterate over all constraints order doesn't matter....\n start_index = 0\n summands = None\n for cname in self.config.auxdata_order:\n modifier, modslice = self.config.modifier(cname), \\\n self.config.par_slice(cname)\n modalphas = modifier.alphas(pars[modslice])\n end_index = start_index + int(modalphas.shape[0])\n thisauxdata = auxdata[start_index:end_index]\n start_index = end_index\n constraint_term = tensorlib.log(modifier.pdf(thisauxdata, modalphas))\n summands = constraint_term if summands is None else tensorlib.concatenate([summands,constraint_term])\n return tensorlib.sum(summands) if summands is not None else 0\n\n def logpdf(self, pars, data):\n pars, data = tensorlib.astensor(pars), tensorlib.astensor(data)\n cut = int(data.shape[0]) - len(self.config.auxdata)\n actual_data, aux_data = data[:cut], data[cut:]\n lambdas_data = self.expected_actualdata(pars)\n summands = tensorlib.log(tensorlib.poisson(actual_data, lambdas_data))\n\n result = tensorlib.sum(summands) + self.constraint_logpdf(aux_data, pars)\n return tensorlib.astensor(result) * tensorlib.ones((1)) #ensure (1,) array shape also for numpy\n\n def pdf(self, pars, data):\n return tensorlib.exp(self.logpdf(pars, data))\n\n\ndef generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds):\n bestfit_nuisance_asimov = optimizer.constrained_bestfit(\n loglambdav, asimov_mu, data, pdf, init_pars, par_bounds)\n return pdf.expected_data(bestfit_nuisance_asimov)\n\n##########################\n\n\ndef loglambdav(pars, data, pdf):\n return -2 * pdf.logpdf(pars, data)\n\ndef qmu(mu, data, pdf, init_pars, par_bounds):\n # The Test Statistic\n mubhathat = tensorlib.tolist(optimizer.constrained_bestfit(loglambdav, mu, data, pdf, init_pars, par_bounds))\n muhatbhat = tensorlib.tolist(optimizer.unconstrained_bestfit(loglambdav, data, pdf, init_pars, par_bounds))\n qmu = tensorlib.tolist(loglambdav(mubhathat, data, pdf) - loglambdav(muhatbhat, data, pdf))[0]\n if muhatbhat[pdf.config.poi_index] > mu:\n return 0.0\n if -1e-6 < qmu < 0:\n log.warning('WARNING: qmu negative: %s', qmu)\n return 0.0\n return qmu\n\nfrom scipy.stats import norm\ndef pvals_from_teststat(sqrtqmu_v, sqrtqmuA_v):\n CLsb = 1 - norm.cdf(sqrtqmu_v)\n CLb = norm.cdf(sqrtqmuA_v - sqrtqmu_v)\n CLs = CLb / CLsb\n return CLsb, CLb, CLs\n\nimport math\ndef runOnePoint(muTest, data, pdf, init_pars, par_bounds):\n asimov_mu = 0.0\n asimov_data = tensorlib.tolist(generate_asimov_data(asimov_mu, data,\n pdf, init_pars, par_bounds))\n\n qmu_v = qmu(muTest, data, pdf, init_pars, par_bounds)\n qmuA_v = qmu(muTest, asimov_data, pdf, init_pars, par_bounds)\n\n sqrtqmu_v = math.sqrt(qmu_v)\n sqrtqmuA_v = math.sqrt(qmuA_v)\n\n sigma = muTest / sqrtqmuA_v if sqrtqmuA_v > 0 else None\n\n CLsb, CLb, CLs = pvals_from_teststat(sqrtqmu_v, sqrtqmuA_v)\n\n CLs_exp = []\n for nsigma in [-2, -1, 0, 1, 2]:\n sqrtqmu_v_sigma = sqrtqmuA_v - nsigma\n CLs_exp.append(pvals_from_teststat(sqrtqmu_v_sigma, sqrtqmuA_v)[-1])\n return qmu_v, qmuA_v, sigma, CLsb, CLb, CLs, CLs_exp\n",
"path": "pyhf/__init__.py"
}
] | diff --git a/pyhf/__init__.py b/pyhf/__init__.py
index cb29a5e3e4..2c1c1598a3 100644
--- a/pyhf/__init__.py
+++ b/pyhf/__init__.py
@@ -5,7 +5,9 @@
log = logging.getLogger(__name__)
tensorlib = tensor.numpy_backend()
+default_backend = tensorlib
optimizer = optimize.scipy_optimizer()
+default_optimizer = optimizer
def set_backend(backend):
"""
diff --git a/tests/benchmarks/test_benchmark.py b/tests/benchmarks/test_benchmark.py
index 8dabac86cf..9ddbec1033 100644
--- a/tests/benchmarks/test_benchmark.py
+++ b/tests/benchmarks/test_benchmark.py
@@ -106,7 +106,6 @@ def test_runOnePoint(benchmark, backend, n_bins):
Returns:
None
"""
- default_backend = pyhf.tensorlib
pyhf.set_backend(backend)
source = generate_source_static(n_bins)
@@ -118,8 +117,6 @@ def test_runOnePoint(benchmark, backend, n_bins):
assert benchmark(runOnePoint, pdf, data) is not None
except AssertionError:
print('benchmarking has failed for n_bins = {}'.formant(n_bins))
- pyhf.set_backend(default_backend)
assert False
# Reset backend
- pyhf.set_backend(default_backend)
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000000..d69e6c447d
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,7 @@
+import pytest
+import pyhf
+
[email protected](scope='function', autouse=True)
+def reset_backend():
+ yield reset_backend
+ pyhf.set_backend(pyhf.default_backend)
diff --git a/tests/test_backend_consistency.py b/tests/test_backend_consistency.py
index e365097f99..ddf81b870c 100644
--- a/tests/test_backend_consistency.py
+++ b/tests/test_backend_consistency.py
@@ -4,7 +4,6 @@
import numpy as np
import pytest
-
def generate_source_static(n_bins):
"""
Create the source structure for the given number of bins.
@@ -86,7 +85,6 @@ def test_runOnePoint_q_mu(n_bins,
Returns:
None
"""
- default_backend = pyhf.tensorlib
source = generate_source_static(n_bins)
pdf = hepdata_like(source['bindata']['sig'],
@@ -128,15 +126,10 @@ def test_runOnePoint_q_mu(n_bins,
except AssertionError:
print('Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
tolerance['numpy'], numpy_ratio_delta_unity.tolist()))
- pyhf.set_backend(default_backend)
assert False
try:
assert (tensors_ratio_delta_unity < tolerance['tensors']).all()
except AssertionError:
print('Ratio between tensor backends exceeded tolerance of {}: {}'.format(
tolerance['tensors'], tensors_ratio_delta_unity.tolist()))
- pyhf.set_backend(default_backend)
assert False
-
- # Reset backend
- pyhf.set_backend(default_backend)
diff --git a/tests/test_optim.py b/tests/test_optim.py
index 1d278e329f..a9a27fde46 100644
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -42,7 +42,6 @@ def test_optim_numpy():
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
- oldlib = pyhf.tensorlib
pyhf.set_backend(pyhf.tensor.numpy_backend(poisson_from_normal=True))
optim = pyhf.optimizer
@@ -53,8 +52,6 @@ def test_optim_numpy():
result = optim.constrained_bestfit(pyhf.loglambdav, 1.0, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
- pyhf.set_backend(oldlib)
-
def test_optim_pytorch():
source = {
@@ -96,8 +93,6 @@ def test_optim_pytorch():
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
- oldlib = pyhf.tensorlib
-
pyhf.set_backend(pyhf.tensor.pytorch_backend(poisson_from_normal=True))
optim = pyhf.optimizer
@@ -107,8 +102,6 @@ def test_optim_pytorch():
result = optim.constrained_bestfit(pyhf.loglambdav, 1.0, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
- pyhf.set_backend(oldlib)
-
def test_optim_tflow():
source = {
@@ -150,8 +143,6 @@ def test_optim_tflow():
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
- oldlib = pyhf.tensorlib
-
pyhf.set_backend(pyhf.tensor.tensorflow_backend())
pyhf.tensorlib.session = tf.Session()
optim = pyhf.optimizer
@@ -161,5 +152,3 @@ def test_optim_tflow():
result = optim.constrained_bestfit(pyhf.loglambdav, 1.0, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
-
- pyhf.set_backend(oldlib)
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 0bac85d27a..1318ec7fbf 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -41,8 +41,6 @@ def test_common_tensor_backends():
def test_pdf_eval():
- oldlib = pyhf.tensorlib
-
tf_sess = tf.Session()
backends = [numpy_backend(poisson_from_normal=True),
pytorch_backend(),
@@ -92,12 +90,8 @@ def test_pdf_eval():
assert np.std(values) < 1e-6
- pyhf.set_backend(oldlib)
-
def test_pdf_eval_2():
- oldlib = pyhf.tensorlib
-
tf_sess = tf.Session()
backends = [numpy_backend(poisson_from_normal=True),
pytorch_backend(),
@@ -126,5 +120,3 @@ def test_pdf_eval_2():
values.append(pyhf.tensorlib.tolist(v1)[0])
assert np.std(values) < 1e-6
-
- pyhf.set_backend(oldlib)
|
locustio__locust-1395 | Update flask version
Our minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)
https://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.
I can do the PR
| [
{
"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=0.10.1\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n",
"path": "setup.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 80716c5a28..26f4eec5e8 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
version=version,
install_requires=[
"gevent>=1.5.0",
- "flask>=0.10.1",
+ "flask>=1.1.2",
"requests>=2.9.1",
"msgpack>=0.6.2",
"pyzmq>=16.0.2",
|
encode__httpx-1054 | Type-checking our tests
I know this is not a standard thing to do across Encode projects, but I've been wondering if it would be worth starting to type-hint our tests.
I've seen at least two instances of this recently:
- In HTTPX: https://github.com/encode/httpx/pull/648#discussion_r359862603
- In Starlette: https://github.com/encode/starlette/issues/722
My rationale is based on two aspects:
- It improves our upfront knowledge about how users will actually use HTTPX — currently their usage of type hints in the wild is not reflected anywhere.
- It helps us catch type hint inconsistencies we wouldn't see in the core package.
The main counter-argument, I suppose, is that type hinting tests is tedious. I think that's fair, but I believe the two pro's above make it compelling.
Thoughts?
| [
{
"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterator,\n Callable,\n Dict,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n str,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\nRequestData = Union[dict, str, bytes, Iterator[bytes], AsyncIterator[bytes]]\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]\n",
"path": "httpx/_types.py"
}
] | [
{
"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterator,\n Callable,\n Dict,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n str,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n]\n\nRequestData = Union[dict, str, bytes, Iterator[bytes], AsyncIterator[bytes]]\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n",
"path": "httpx/_types.py"
}
] | diff --git a/httpx/_types.py b/httpx/_types.py
index a74020a4ae..d2fc098e24 100644
--- a/httpx/_types.py
+++ b/httpx/_types.py
@@ -72,4 +72,4 @@
# (filename, file (or text), content_type)
Tuple[Optional[str], FileContent, Optional[str]],
]
-RequestFiles = Union[Mapping[str, FileTypes], List[Tuple[str, FileTypes]]]
+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
diff --git a/scripts/check b/scripts/check
index 2b42506f6f..f9fc19343b 100755
--- a/scripts/check
+++ b/scripts/check
@@ -10,5 +10,5 @@ set -x
${PREFIX}black --check --diff --target-version=py36 $SOURCE_FILES
${PREFIX}flake8 $SOURCE_FILES
-${PREFIX}mypy httpx
+${PREFIX}mypy $SOURCE_FILES
${PREFIX}isort --check --diff --project=httpx $SOURCE_FILES
diff --git a/tests/client/test_async_client.py b/tests/client/test_async_client.py
index 649e428f5e..6818b4a444 100644
--- a/tests/client/test_async_client.py
+++ b/tests/client/test_async_client.py
@@ -174,8 +174,11 @@ def test_dispatch_deprecated():
def test_asgi_dispatch_deprecated():
+ async def app(scope, receive, send):
+ pass
+
with pytest.warns(DeprecationWarning) as record:
- ASGIDispatch(None)
+ ASGIDispatch(app)
assert len(record) == 1
assert (
diff --git a/tests/client/test_auth.py b/tests/client/test_auth.py
index 818e65904a..13184a06ba 100644
--- a/tests/client/test_auth.py
+++ b/tests/client/test_auth.py
@@ -11,7 +11,6 @@
Auth,
Client,
DigestAuth,
- Headers,
ProtocolError,
Request,
RequestBodyUnavailable,
@@ -86,23 +85,26 @@ def __init__(
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
- stream: ContentStream,
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
+ headers: typing.List[typing.Tuple[bytes, bytes]] = None,
+ stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
]:
if self._response_count < self.send_response_after_attempt:
- return self.challenge_send(method, url, headers, stream)
+ assert headers is not None
+ return self.challenge_send(method, headers)
authorization = get_header_value(headers, "Authorization")
body = JSONStream({"auth": authorization})
return b"HTTP/1.1", 200, b"", [], body
def challenge_send(
- self, method: bytes, url: URL, headers: Headers, stream: ContentStream,
- ) -> typing.Tuple[int, bytes, Headers, ContentStream]:
+ self, method: bytes, headers: typing.List[typing.Tuple[bytes, bytes]],
+ ) -> typing.Tuple[
+ bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
+ ]:
self._response_count += 1
nonce = (
hashlib.sha256(os.urandom(8)).hexdigest()
@@ -297,7 +299,8 @@ async def test_auth_hidden_header() -> None:
async def test_auth_invalid_type() -> None:
url = "https://example.org/"
client = AsyncClient(
- transport=AsyncMockTransport(), auth="not a tuple, not a callable",
+ transport=AsyncMockTransport(),
+ auth="not a tuple, not a callable", # type: ignore
)
with pytest.raises(TypeError):
await client.get(url)
diff --git a/tests/client/test_client.py b/tests/client/test_client.py
index 02f78f6999..1426fc216c 100644
--- a/tests/client/test_client.py
+++ b/tests/client/test_client.py
@@ -182,8 +182,11 @@ def test_dispatch_deprecated():
def test_wsgi_dispatch_deprecated():
+ def app(start_response, environ):
+ pass
+
with pytest.warns(DeprecationWarning) as record:
- WSGIDispatch(None)
+ WSGIDispatch(app)
assert len(record) == 1
assert (
diff --git a/tests/client/test_cookies.py b/tests/client/test_cookies.py
index a109ccc6ee..68b6c64cf5 100644
--- a/tests/client/test_cookies.py
+++ b/tests/client/test_cookies.py
@@ -20,14 +20,15 @@ class MockTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
- stream: ContentStream,
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
+ headers: typing.List[typing.Tuple[bytes, bytes]] = None,
+ stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
]:
host, scheme, port, path = url
+ body: ContentStream
if path.startswith(b"/echo_cookies"):
cookie = get_header_value(headers, "cookie")
body = JSONStream({"cookies": cookie})
diff --git a/tests/client/test_headers.py b/tests/client/test_headers.py
index 6f26b1c7b3..2f87c38a1f 100755
--- a/tests/client/test_headers.py
+++ b/tests/client/test_headers.py
@@ -13,13 +13,14 @@ class MockTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
- stream: ContentStream,
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
+ headers: typing.List[typing.Tuple[bytes, bytes]] = None,
+ stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
]:
+ assert headers is not None
headers_dict = {
key.decode("ascii"): value.decode("ascii") for key, value in headers
}
diff --git a/tests/client/test_properties.py b/tests/client/test_properties.py
index 5dbbb4690a..011c593cd3 100644
--- a/tests/client/test_properties.py
+++ b/tests/client/test_properties.py
@@ -3,14 +3,14 @@
def test_client_headers():
client = AsyncClient()
- client.headers = {"a": "b"}
+ client.headers = {"a": "b"} # type: ignore
assert isinstance(client.headers, Headers)
assert client.headers["A"] == "b"
def test_client_cookies():
client = AsyncClient()
- client.cookies = {"a": "b"}
+ client.cookies = {"a": "b"} # type: ignore
assert isinstance(client.cookies, Cookies)
mycookies = list(client.cookies.jar)
assert len(mycookies) == 1
diff --git a/tests/client/test_proxies.py b/tests/client/test_proxies.py
index 5222b08e34..fb21760bf7 100644
--- a/tests/client/test_proxies.py
+++ b/tests/client/test_proxies.py
@@ -1,3 +1,4 @@
+import httpcore
import pytest
import httpx
@@ -24,7 +25,9 @@ def test_proxies_parameter(proxies, expected_proxies):
for proxy_key, url in expected_proxies:
assert proxy_key in client.proxies
- assert client.proxies[proxy_key].proxy_origin == httpx.URL(url).raw[:3]
+ proxy = client.proxies[proxy_key]
+ assert isinstance(proxy, httpcore.AsyncHTTPProxy)
+ assert proxy.proxy_origin == httpx.URL(url).raw[:3]
assert len(expected_proxies) == len(client.proxies)
@@ -81,6 +84,7 @@ def test_transport_for_request(url, proxies, expected):
if expected is None:
assert transport is client.transport
else:
+ assert isinstance(transport, httpcore.AsyncHTTPProxy)
assert transport.proxy_origin == httpx.URL(expected).raw[:3]
diff --git a/tests/client/test_queryparams.py b/tests/client/test_queryparams.py
index 97e1199620..10a03539e2 100644
--- a/tests/client/test_queryparams.py
+++ b/tests/client/test_queryparams.py
@@ -3,7 +3,7 @@
import httpcore
import pytest
-from httpx import URL, AsyncClient, Headers, QueryParams
+from httpx import URL, AsyncClient, QueryParams
from httpx._content_streams import ContentStream, JSONStream
@@ -11,16 +11,15 @@ class MockTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
- stream: ContentStream,
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
+ headers: typing.List[typing.Tuple[bytes, bytes]] = None,
+ stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
]:
- headers = Headers()
body = JSONStream({"ok": "ok"})
- return b"HTTP/1.1", 200, b"OK", headers, body
+ return b"HTTP/1.1", 200, b"OK", [], body
def test_client_queryparams():
@@ -35,7 +34,7 @@ def test_client_queryparams_string():
assert client.params["a"] == "b"
client = AsyncClient()
- client.params = "a=b"
+ client.params = "a=b" # type: ignore
assert isinstance(client.params, QueryParams)
assert client.params["a"] == "b"
diff --git a/tests/client/test_redirects.py b/tests/client/test_redirects.py
index ae800fa792..30b6f6a128 100644
--- a/tests/client/test_redirects.py
+++ b/tests/client/test_redirects.py
@@ -103,8 +103,8 @@ async def body():
headers_dict = {
key.decode("ascii"): value.decode("ascii") for key, value in headers
}
- content = ByteStream(json.dumps({"headers": headers_dict}).encode())
- return b"HTTP/1.1", 200, b"OK", [], content
+ stream = ByteStream(json.dumps({"headers": headers_dict}).encode())
+ return b"HTTP/1.1", 200, b"OK", [], stream
elif path == b"/redirect_body":
code = codes.PERMANENT_REDIRECT
@@ -121,10 +121,10 @@ async def body():
headers_dict = {
key.decode("ascii"): value.decode("ascii") for key, value in headers
}
- body = ByteStream(
+ stream = ByteStream(
json.dumps({"body": content.decode(), "headers": headers_dict}).encode()
)
- return b"HTTP/1.1", 200, b"OK", [], body
+ return b"HTTP/1.1", 200, b"OK", [], stream
elif path == b"/cross_subdomain":
host = get_header_value(headers, "host")
@@ -402,9 +402,9 @@ class MockCookieTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
- headers: typing.List[typing.Tuple[bytes, bytes]],
- stream: ContentStream,
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
+ headers: typing.List[typing.Tuple[bytes, bytes]] = None,
+ stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
@@ -432,7 +432,8 @@ async def request(
]
return b"HTTP/1.1", status_code, b"See Other", headers, ByteStream(b"")
- elif path == b"/logout":
+ else:
+ assert path == b"/logout"
status_code = codes.SEE_OTHER
headers = [
(b"location", b"/"),
diff --git a/tests/conftest.py b/tests/conftest.py
index a145ce0fa0..10576ebd8a 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -56,7 +56,7 @@ async def my_async_test():
@pytest.fixture(scope="function", autouse=True)
-def clean_environ() -> typing.Dict[str, typing.Any]:
+def clean_environ():
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
os.environ.clear()
diff --git a/tests/test_api.py b/tests/test_api.py
index 4c1d611620..2d51d99e8a 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -68,7 +68,6 @@ def test_stream(server):
assert response.http_version == "HTTP/1.1"
[email protected]
-async def test_get_invalid_url(server):
+def test_get_invalid_url():
with pytest.raises(httpx.InvalidURL):
- await httpx.get("invalid://example.org")
+ httpx.get("invalid://example.org")
diff --git a/tests/test_config.py b/tests/test_config.py
index 41d81916ad..46d154cdb8 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -56,7 +56,7 @@ def test_load_ssl_config_verify_env_file(https_server, ca_cert_pem_file, config)
def test_load_ssl_config_verify_directory():
path = Path(certifi.where()).parent
- ssl_config = SSLConfig(verify=path)
+ ssl_config = SSLConfig(verify=str(path))
context = ssl_config.ssl_context
assert context.verify_mode == ssl.VerifyMode.CERT_REQUIRED
assert context.check_hostname is True
@@ -192,7 +192,7 @@ def test_ssl_config_support_for_keylog_file(tmpdir, monkeypatch): # pragma: noc
ssl_config = SSLConfig(trust_env=True)
- assert ssl_config.ssl_context.keylog_filename is None
+ assert ssl_config.ssl_context.keylog_filename is None # type: ignore
filename = str(tmpdir.join("test.log"))
@@ -201,11 +201,11 @@ def test_ssl_config_support_for_keylog_file(tmpdir, monkeypatch): # pragma: noc
ssl_config = SSLConfig(trust_env=True)
- assert ssl_config.ssl_context.keylog_filename == filename
+ assert ssl_config.ssl_context.keylog_filename == filename # type: ignore
ssl_config = SSLConfig(trust_env=False)
- assert ssl_config.ssl_context.keylog_filename is None
+ assert ssl_config.ssl_context.keylog_filename is None # type: ignore
@pytest.mark.parametrize(
diff --git a/tests/test_content_streams.py b/tests/test_content_streams.py
index 2b2adc92ae..140aa8d2af 100644
--- a/tests/test_content_streams.py
+++ b/tests/test_content_streams.py
@@ -203,7 +203,7 @@ async def test_empty_request():
def test_invalid_argument():
with pytest.raises(TypeError):
- encode(123)
+ encode(123) # type: ignore
@pytest.mark.asyncio
diff --git a/tests/test_multipart.py b/tests/test_multipart.py
index fbabc7c483..7d6f8e025d 100644
--- a/tests/test_multipart.py
+++ b/tests/test_multipart.py
@@ -8,7 +8,7 @@
import pytest
import httpx
-from httpx._content_streams import AsyncIteratorStream, encode
+from httpx._content_streams import AsyncIteratorStream, MultipartStream, encode
from httpx._utils import format_form_param
@@ -16,7 +16,7 @@ class MockTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
- url: typing.Tuple[bytes, bytes, int, bytes],
+ url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
headers: typing.List[typing.Tuple[bytes, bytes]] = None,
stream: httpcore.AsyncByteStream = None,
timeout: typing.Dict[str, typing.Optional[float]] = None,
@@ -27,6 +27,7 @@ async def request(
typing.List[typing.Tuple[bytes, bytes]],
httpcore.AsyncByteStream,
]:
+ assert stream is not None
content = AsyncIteratorStream(aiterator=(part async for part in stream))
return b"HTTP/1.1", 200, b"OK", [], content
@@ -46,7 +47,10 @@ async def test_multipart(value, output):
# bit grungy, but sufficient just for our testing purposes.
boundary = response.request.headers["Content-Type"].split("boundary=")[-1]
content_length = response.request.headers["Content-Length"]
- pdict = {"boundary": boundary.encode("ascii"), "CONTENT-LENGTH": content_length}
+ pdict: dict = {
+ "boundary": boundary.encode("ascii"),
+ "CONTENT-LENGTH": content_length,
+ }
multipart = cgi.parse_multipart(io.BytesIO(response.content), pdict)
# Note that the expected return type for text fields
@@ -91,7 +95,10 @@ async def test_multipart_file_tuple():
# bit grungy, but sufficient just for our testing purposes.
boundary = response.request.headers["Content-Type"].split("boundary=")[-1]
content_length = response.request.headers["Content-Length"]
- pdict = {"boundary": boundary.encode("ascii"), "CONTENT-LENGTH": content_length}
+ pdict: dict = {
+ "boundary": boundary.encode("ascii"),
+ "CONTENT-LENGTH": content_length,
+ }
multipart = cgi.parse_multipart(io.BytesIO(response.content), pdict)
# Note that the expected return type for text fields
@@ -117,6 +124,7 @@ def test_multipart_encode(tmp_path: typing.Any) -> None:
boundary = os.urandom(16).hex()
stream = encode(data=data, files=files)
+ assert isinstance(stream, MultipartStream)
assert stream.can_replay()
assert stream.content_type == f"multipart/form-data; boundary={boundary}"
@@ -143,6 +151,7 @@ def test_multipart_encode_files_allows_filenames_as_none() -> None:
boundary = os.urandom(16).hex()
stream = encode(data={}, files=files)
+ assert isinstance(stream, MultipartStream)
assert stream.can_replay()
assert stream.content_type == f"multipart/form-data; boundary={boundary}"
@@ -169,6 +178,7 @@ def test_multipart_encode_files_guesses_correct_content_type(
boundary = os.urandom(16).hex()
stream = encode(data={}, files=files)
+ assert isinstance(stream, MultipartStream)
assert stream.can_replay()
assert stream.content_type == f"multipart/form-data; boundary={boundary}"
@@ -192,6 +202,7 @@ def test_multipart_encode_files_allows_bytes_or_str_content(
boundary = os.urandom(16).hex()
stream = encode(data={}, files=files)
+ assert isinstance(stream, MultipartStream)
assert stream.can_replay()
assert stream.content_type == f"multipart/form-data; boundary={boundary}"
@@ -226,7 +237,7 @@ def data() -> typing.Iterator[bytes]:
yield b"Hello"
yield b"World"
- fileobj = IteratorIO(data())
+ fileobj: typing.Any = IteratorIO(data())
files = {"file": fileobj}
stream = encode(files=files, boundary=b"+++")
assert not stream.can_replay()
diff --git a/tests/test_status_codes.py b/tests/test_status_codes.py
index e62b3e067b..c53e95965d 100644
--- a/tests/test_status_codes.py
+++ b/tests/test_status_codes.py
@@ -7,7 +7,7 @@ def test_status_code_as_int():
def test_lowercase_status_code():
- assert httpx.codes.not_found == 404
+ assert httpx.codes.not_found == 404 # type: ignore
def test_reason_phrase_for_status_code():
|
pyca__cryptography-4307 | incorrect key_size of sect571r1
Hello!
https://github.com/pyca/cryptography/blob/17c8f126c7c7d5ce886112a6e924277a7b203f25/src/cryptography/hazmat/primitives/asymmetric/ec.py#L138
The value there should be 570. From [the standard](http://www.secg.org/sec2-v2.pdf) the order of the published generator is
```py
>>> order = 0x03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E8382E9BB2FE84E47
>>> print(len(bin(order))-2)
570
```
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurve(object):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n The name of the curve. e.g. secp256r1.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurveSignatureAlgorithm(object):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n The digest algorithm used with this signature.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePrivateKey(object):\n @abc.abstractmethod\n def signer(self, signature_algorithm):\n \"\"\"\n Returns an AsymmetricSignatureContext used for signing data.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, algorithm, peer_public_key):\n \"\"\"\n Performs a key exchange operation using the provided algorithm with the\n provided peer's public key.\n \"\"\"\n\n @abc.abstractmethod\n def public_key(self):\n \"\"\"\n The EllipticCurvePublicKey for this private key.\n \"\"\"\n\n @abc.abstractproperty\n def curve(self):\n \"\"\"\n The EllipticCurve that this key is on.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n @abc.abstractproperty\n def sign(self, data, signature_algorithm):\n \"\"\"\n Signs the data\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePrivateKeyWithSerialization(EllipticCurvePrivateKey):\n @abc.abstractmethod\n def private_numbers(self):\n \"\"\"\n Returns an EllipticCurvePrivateNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(self, encoding, format, encryption_algorithm):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePublicKey(object):\n @abc.abstractmethod\n def verifier(self, signature, signature_algorithm):\n \"\"\"\n Returns an AsymmetricVerificationContext used for signing data.\n \"\"\"\n\n @abc.abstractproperty\n def curve(self):\n \"\"\"\n The EllipticCurve that this key is on.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n @abc.abstractmethod\n def public_numbers(self):\n \"\"\"\n Returns an EllipticCurvePublicNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def public_bytes(self, encoding, format):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def verify(self, signature, data, signature_algorithm):\n \"\"\"\n Verifies the signature of the data.\n \"\"\"\n\n\nEllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey\n\n\[email protected]_interface(EllipticCurve)\nclass SECT571R1(object):\n name = \"sect571r1\"\n key_size = 571\n\n\[email protected]_interface(EllipticCurve)\nclass SECT409R1(object):\n name = \"sect409r1\"\n key_size = 409\n\n\[email protected]_interface(EllipticCurve)\nclass SECT283R1(object):\n name = \"sect283r1\"\n key_size = 283\n\n\[email protected]_interface(EllipticCurve)\nclass SECT233R1(object):\n name = \"sect233r1\"\n key_size = 233\n\n\[email protected]_interface(EllipticCurve)\nclass SECT163R2(object):\n name = \"sect163r2\"\n key_size = 163\n\n\[email protected]_interface(EllipticCurve)\nclass SECT571K1(object):\n name = \"sect571k1\"\n key_size = 571\n\n\[email protected]_interface(EllipticCurve)\nclass SECT409K1(object):\n name = \"sect409k1\"\n key_size = 409\n\n\[email protected]_interface(EllipticCurve)\nclass SECT283K1(object):\n name = \"sect283k1\"\n key_size = 283\n\n\[email protected]_interface(EllipticCurve)\nclass SECT233K1(object):\n name = \"sect233k1\"\n key_size = 233\n\n\[email protected]_interface(EllipticCurve)\nclass SECT163K1(object):\n name = \"sect163k1\"\n key_size = 163\n\n\[email protected]_interface(EllipticCurve)\nclass SECP521R1(object):\n name = \"secp521r1\"\n key_size = 521\n\n\[email protected]_interface(EllipticCurve)\nclass SECP384R1(object):\n name = \"secp384r1\"\n key_size = 384\n\n\[email protected]_interface(EllipticCurve)\nclass SECP256R1(object):\n name = \"secp256r1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass SECP256K1(object):\n name = \"secp256k1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass SECP224R1(object):\n name = \"secp224r1\"\n key_size = 224\n\n\[email protected]_interface(EllipticCurve)\nclass SECP192R1(object):\n name = \"secp192r1\"\n key_size = 192\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP256R1(object):\n name = \"brainpoolP256r1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP384R1(object):\n name = \"brainpoolP384r1\"\n key_size = 384\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP512R1(object):\n name = \"brainpoolP512r1\"\n key_size = 512\n\n\n_CURVE_TYPES = {\n \"prime192v1\": SECP192R1,\n \"prime256v1\": SECP256R1,\n\n \"secp192r1\": SECP192R1,\n \"secp224r1\": SECP224R1,\n \"secp256r1\": SECP256R1,\n \"secp384r1\": SECP384R1,\n \"secp521r1\": SECP521R1,\n \"secp256k1\": SECP256K1,\n\n \"sect163k1\": SECT163K1,\n \"sect233k1\": SECT233K1,\n \"sect283k1\": SECT283K1,\n \"sect409k1\": SECT409K1,\n \"sect571k1\": SECT571K1,\n\n \"sect163r2\": SECT163R2,\n \"sect233r1\": SECT233R1,\n \"sect283r1\": SECT283R1,\n \"sect409r1\": SECT409R1,\n \"sect571r1\": SECT571R1,\n\n \"brainpoolP256r1\": BrainpoolP256R1,\n \"brainpoolP384r1\": BrainpoolP384R1,\n \"brainpoolP512r1\": BrainpoolP512R1,\n}\n\n\[email protected]_interface(EllipticCurveSignatureAlgorithm)\nclass ECDSA(object):\n def __init__(self, algorithm):\n self._algorithm = algorithm\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n\ndef generate_private_key(curve, backend):\n return backend.generate_elliptic_curve_private_key(curve)\n\n\ndef derive_private_key(private_value, curve, backend):\n if not isinstance(private_value, six.integer_types):\n raise TypeError(\"private_value must be an integer type.\")\n\n if private_value <= 0:\n raise ValueError(\"private_value must be a positive integer.\")\n\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must provide the EllipticCurve interface.\")\n\n return backend.derive_elliptic_curve_private_key(private_value, curve)\n\n\nclass EllipticCurvePublicNumbers(object):\n def __init__(self, x, y, curve):\n if (\n not isinstance(x, six.integer_types) or\n not isinstance(y, six.integer_types)\n ):\n raise TypeError(\"x and y must be integers.\")\n\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must provide the EllipticCurve interface.\")\n\n self._y = y\n self._x = x\n self._curve = curve\n\n def public_key(self, backend):\n return backend.load_elliptic_curve_public_numbers(self)\n\n def encode_point(self):\n # key_size is in bits. Convert to bytes and round up\n byte_length = (self.curve.key_size + 7) // 8\n return (\n b'\\x04' + utils.int_to_bytes(self.x, byte_length) +\n utils.int_to_bytes(self.y, byte_length)\n )\n\n @classmethod\n def from_encoded_point(cls, curve, data):\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must be an EllipticCurve instance\")\n\n if data.startswith(b'\\x04'):\n # key_size is in bits. Convert to bytes and round up\n byte_length = (curve.key_size + 7) // 8\n if len(data) == 2 * byte_length + 1:\n x = utils.int_from_bytes(data[1:byte_length + 1], 'big')\n y = utils.int_from_bytes(data[byte_length + 1:], 'big')\n return cls(x, y, curve)\n else:\n raise ValueError('Invalid elliptic curve point data length')\n else:\n raise ValueError('Unsupported elliptic curve point type')\n\n curve = utils.read_only_property(\"_curve\")\n x = utils.read_only_property(\"_x\")\n y = utils.read_only_property(\"_y\")\n\n def __eq__(self, other):\n if not isinstance(other, EllipticCurvePublicNumbers):\n return NotImplemented\n\n return (\n self.x == other.x and\n self.y == other.y and\n self.curve.name == other.curve.name and\n self.curve.key_size == other.curve.key_size\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.x, self.y, self.curve.name, self.curve.key_size))\n\n def __repr__(self):\n return (\n \"<EllipticCurvePublicNumbers(curve={0.curve.name}, x={0.x}, \"\n \"y={0.y}>\".format(self)\n )\n\n\nclass EllipticCurvePrivateNumbers(object):\n def __init__(self, private_value, public_numbers):\n if not isinstance(private_value, six.integer_types):\n raise TypeError(\"private_value must be an integer.\")\n\n if not isinstance(public_numbers, EllipticCurvePublicNumbers):\n raise TypeError(\n \"public_numbers must be an EllipticCurvePublicNumbers \"\n \"instance.\"\n )\n\n self._private_value = private_value\n self._public_numbers = public_numbers\n\n def private_key(self, backend):\n return backend.load_elliptic_curve_private_numbers(self)\n\n private_value = utils.read_only_property(\"_private_value\")\n public_numbers = utils.read_only_property(\"_public_numbers\")\n\n def __eq__(self, other):\n if not isinstance(other, EllipticCurvePrivateNumbers):\n return NotImplemented\n\n return (\n self.private_value == other.private_value and\n self.public_numbers == other.public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.private_value, self.public_numbers))\n\n\nclass ECDH(object):\n pass\n",
"path": "src/cryptography/hazmat/primitives/asymmetric/ec.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurve(object):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n The name of the curve. e.g. secp256r1.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurveSignatureAlgorithm(object):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n The digest algorithm used with this signature.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePrivateKey(object):\n @abc.abstractmethod\n def signer(self, signature_algorithm):\n \"\"\"\n Returns an AsymmetricSignatureContext used for signing data.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, algorithm, peer_public_key):\n \"\"\"\n Performs a key exchange operation using the provided algorithm with the\n provided peer's public key.\n \"\"\"\n\n @abc.abstractmethod\n def public_key(self):\n \"\"\"\n The EllipticCurvePublicKey for this private key.\n \"\"\"\n\n @abc.abstractproperty\n def curve(self):\n \"\"\"\n The EllipticCurve that this key is on.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n @abc.abstractproperty\n def sign(self, data, signature_algorithm):\n \"\"\"\n Signs the data\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePrivateKeyWithSerialization(EllipticCurvePrivateKey):\n @abc.abstractmethod\n def private_numbers(self):\n \"\"\"\n Returns an EllipticCurvePrivateNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(self, encoding, format, encryption_algorithm):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass EllipticCurvePublicKey(object):\n @abc.abstractmethod\n def verifier(self, signature, signature_algorithm):\n \"\"\"\n Returns an AsymmetricVerificationContext used for signing data.\n \"\"\"\n\n @abc.abstractproperty\n def curve(self):\n \"\"\"\n The EllipticCurve that this key is on.\n \"\"\"\n\n @abc.abstractproperty\n def key_size(self):\n \"\"\"\n Bit size of a secret scalar for the curve.\n \"\"\"\n\n @abc.abstractmethod\n def public_numbers(self):\n \"\"\"\n Returns an EllipticCurvePublicNumbers.\n \"\"\"\n\n @abc.abstractmethod\n def public_bytes(self, encoding, format):\n \"\"\"\n Returns the key serialized as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def verify(self, signature, data, signature_algorithm):\n \"\"\"\n Verifies the signature of the data.\n \"\"\"\n\n\nEllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey\n\n\[email protected]_interface(EllipticCurve)\nclass SECT571R1(object):\n name = \"sect571r1\"\n key_size = 570\n\n\[email protected]_interface(EllipticCurve)\nclass SECT409R1(object):\n name = \"sect409r1\"\n key_size = 409\n\n\[email protected]_interface(EllipticCurve)\nclass SECT283R1(object):\n name = \"sect283r1\"\n key_size = 283\n\n\[email protected]_interface(EllipticCurve)\nclass SECT233R1(object):\n name = \"sect233r1\"\n key_size = 233\n\n\[email protected]_interface(EllipticCurve)\nclass SECT163R2(object):\n name = \"sect163r2\"\n key_size = 163\n\n\[email protected]_interface(EllipticCurve)\nclass SECT571K1(object):\n name = \"sect571k1\"\n key_size = 571\n\n\[email protected]_interface(EllipticCurve)\nclass SECT409K1(object):\n name = \"sect409k1\"\n key_size = 409\n\n\[email protected]_interface(EllipticCurve)\nclass SECT283K1(object):\n name = \"sect283k1\"\n key_size = 283\n\n\[email protected]_interface(EllipticCurve)\nclass SECT233K1(object):\n name = \"sect233k1\"\n key_size = 233\n\n\[email protected]_interface(EllipticCurve)\nclass SECT163K1(object):\n name = \"sect163k1\"\n key_size = 163\n\n\[email protected]_interface(EllipticCurve)\nclass SECP521R1(object):\n name = \"secp521r1\"\n key_size = 521\n\n\[email protected]_interface(EllipticCurve)\nclass SECP384R1(object):\n name = \"secp384r1\"\n key_size = 384\n\n\[email protected]_interface(EllipticCurve)\nclass SECP256R1(object):\n name = \"secp256r1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass SECP256K1(object):\n name = \"secp256k1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass SECP224R1(object):\n name = \"secp224r1\"\n key_size = 224\n\n\[email protected]_interface(EllipticCurve)\nclass SECP192R1(object):\n name = \"secp192r1\"\n key_size = 192\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP256R1(object):\n name = \"brainpoolP256r1\"\n key_size = 256\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP384R1(object):\n name = \"brainpoolP384r1\"\n key_size = 384\n\n\[email protected]_interface(EllipticCurve)\nclass BrainpoolP512R1(object):\n name = \"brainpoolP512r1\"\n key_size = 512\n\n\n_CURVE_TYPES = {\n \"prime192v1\": SECP192R1,\n \"prime256v1\": SECP256R1,\n\n \"secp192r1\": SECP192R1,\n \"secp224r1\": SECP224R1,\n \"secp256r1\": SECP256R1,\n \"secp384r1\": SECP384R1,\n \"secp521r1\": SECP521R1,\n \"secp256k1\": SECP256K1,\n\n \"sect163k1\": SECT163K1,\n \"sect233k1\": SECT233K1,\n \"sect283k1\": SECT283K1,\n \"sect409k1\": SECT409K1,\n \"sect571k1\": SECT571K1,\n\n \"sect163r2\": SECT163R2,\n \"sect233r1\": SECT233R1,\n \"sect283r1\": SECT283R1,\n \"sect409r1\": SECT409R1,\n \"sect571r1\": SECT571R1,\n\n \"brainpoolP256r1\": BrainpoolP256R1,\n \"brainpoolP384r1\": BrainpoolP384R1,\n \"brainpoolP512r1\": BrainpoolP512R1,\n}\n\n\[email protected]_interface(EllipticCurveSignatureAlgorithm)\nclass ECDSA(object):\n def __init__(self, algorithm):\n self._algorithm = algorithm\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n\ndef generate_private_key(curve, backend):\n return backend.generate_elliptic_curve_private_key(curve)\n\n\ndef derive_private_key(private_value, curve, backend):\n if not isinstance(private_value, six.integer_types):\n raise TypeError(\"private_value must be an integer type.\")\n\n if private_value <= 0:\n raise ValueError(\"private_value must be a positive integer.\")\n\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must provide the EllipticCurve interface.\")\n\n return backend.derive_elliptic_curve_private_key(private_value, curve)\n\n\nclass EllipticCurvePublicNumbers(object):\n def __init__(self, x, y, curve):\n if (\n not isinstance(x, six.integer_types) or\n not isinstance(y, six.integer_types)\n ):\n raise TypeError(\"x and y must be integers.\")\n\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must provide the EllipticCurve interface.\")\n\n self._y = y\n self._x = x\n self._curve = curve\n\n def public_key(self, backend):\n return backend.load_elliptic_curve_public_numbers(self)\n\n def encode_point(self):\n # key_size is in bits. Convert to bytes and round up\n byte_length = (self.curve.key_size + 7) // 8\n return (\n b'\\x04' + utils.int_to_bytes(self.x, byte_length) +\n utils.int_to_bytes(self.y, byte_length)\n )\n\n @classmethod\n def from_encoded_point(cls, curve, data):\n if not isinstance(curve, EllipticCurve):\n raise TypeError(\"curve must be an EllipticCurve instance\")\n\n if data.startswith(b'\\x04'):\n # key_size is in bits. Convert to bytes and round up\n byte_length = (curve.key_size + 7) // 8\n if len(data) == 2 * byte_length + 1:\n x = utils.int_from_bytes(data[1:byte_length + 1], 'big')\n y = utils.int_from_bytes(data[byte_length + 1:], 'big')\n return cls(x, y, curve)\n else:\n raise ValueError('Invalid elliptic curve point data length')\n else:\n raise ValueError('Unsupported elliptic curve point type')\n\n curve = utils.read_only_property(\"_curve\")\n x = utils.read_only_property(\"_x\")\n y = utils.read_only_property(\"_y\")\n\n def __eq__(self, other):\n if not isinstance(other, EllipticCurvePublicNumbers):\n return NotImplemented\n\n return (\n self.x == other.x and\n self.y == other.y and\n self.curve.name == other.curve.name and\n self.curve.key_size == other.curve.key_size\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.x, self.y, self.curve.name, self.curve.key_size))\n\n def __repr__(self):\n return (\n \"<EllipticCurvePublicNumbers(curve={0.curve.name}, x={0.x}, \"\n \"y={0.y}>\".format(self)\n )\n\n\nclass EllipticCurvePrivateNumbers(object):\n def __init__(self, private_value, public_numbers):\n if not isinstance(private_value, six.integer_types):\n raise TypeError(\"private_value must be an integer.\")\n\n if not isinstance(public_numbers, EllipticCurvePublicNumbers):\n raise TypeError(\n \"public_numbers must be an EllipticCurvePublicNumbers \"\n \"instance.\"\n )\n\n self._private_value = private_value\n self._public_numbers = public_numbers\n\n def private_key(self, backend):\n return backend.load_elliptic_curve_private_numbers(self)\n\n private_value = utils.read_only_property(\"_private_value\")\n public_numbers = utils.read_only_property(\"_public_numbers\")\n\n def __eq__(self, other):\n if not isinstance(other, EllipticCurvePrivateNumbers):\n return NotImplemented\n\n return (\n self.private_value == other.private_value and\n self.public_numbers == other.public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.private_value, self.public_numbers))\n\n\nclass ECDH(object):\n pass\n",
"path": "src/cryptography/hazmat/primitives/asymmetric/ec.py"
}
] | diff --git a/src/cryptography/hazmat/primitives/asymmetric/ec.py b/src/cryptography/hazmat/primitives/asymmetric/ec.py
index 83266bb4681c..6cbfcab4c1bd 100644
--- a/src/cryptography/hazmat/primitives/asymmetric/ec.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/ec.py
@@ -135,7 +135,7 @@ def verify(self, signature, data, signature_algorithm):
@utils.register_interface(EllipticCurve)
class SECT571R1(object):
name = "sect571r1"
- key_size = 571
+ key_size = 570
@utils.register_interface(EllipticCurve)
|
cloud-custodian__cloud-custodian-1049 | efs tag support
I am finding that searching for tagging of EFS resources does not consistently report the correct results. It did find an EFS that was incorrectly tagged, but after it was corrected it continues to report the same resource. I use the same filter for other resource types and do not see this behavior.
```
- name: efs-tag-compliance
resource: efs
description:
Notify if an EFS does not comply with tagging best practices.
mode:
type: periodic
schedule: "rate(24 hours)"
role: arn:aws:iam::MYACCOUNT:role/cloud-custodian
filters:
- or:
- "tag:CostCenter": absent
- "tag:POC": absent
- "tag:Service": absent
- "tag:Name": absent
...
```
| [
{
"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema, get_retry\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(object):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = None\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('efs:DescribeMountTargets',\n 'efs:DeleteMountTargets',\n 'efs:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n",
"path": "c7n/resources/efs.py"
}
] | [
{
"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema, get_retry\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(object):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = None\n detail_spec = ('describe_tags', 'FileSystemId', 'FileSystemId', None)\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('efs:DescribeMountTargets',\n 'efs:DeleteMountTargets',\n 'efs:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n",
"path": "c7n/resources/efs.py"
}
] | diff --git a/c7n/resources/efs.py b/c7n/resources/efs.py
index 2687e2113eb..3dfe9b58682 100644
--- a/c7n/resources/efs.py
+++ b/c7n/resources/efs.py
@@ -27,6 +27,7 @@ class resource_type(object):
name = 'Name'
date = 'CreationTime'
dimension = None
+ detail_spec = ('describe_tags', 'FileSystemId', 'FileSystemId', None)
@ElasticFileSystem.action_registry.register('delete')
diff --git a/tests/data/placebo/test_efs_delete/elasticfilesystem.DescribeTags_1.json b/tests/data/placebo/test_efs_delete/elasticfilesystem.DescribeTags_1.json
new file mode 100644
index 00000000000..b07ee79843d
--- /dev/null
+++ b/tests/data/placebo/test_efs_delete/elasticfilesystem.DescribeTags_1.json
@@ -0,0 +1,30 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "d6e6f4a2-13e8-11e7-a10d-03dc08f89c32",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d6e6f4a2-13e8-11e7-a10d-03dc08f89c32",
+ "date": "Tue, 28 Mar 2017 19:00:41 GMT",
+ "content-length": "142",
+ "content-type": "application/json"
+ }
+ },
+ "Tags": [
+ {
+ "Value": "MyDocs",
+ "Key": "Name"
+ },
+ {
+ "Value": "Test User",
+ "Key": "POC"
+ },
+ {
+ "Value": "skunk",
+ "Key": "Service"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/tests/data/placebo/test_efs_query/elasticfilesystem.DescribeTags_1.json b/tests/data/placebo/test_efs_query/elasticfilesystem.DescribeTags_1.json
new file mode 100644
index 00000000000..b07ee79843d
--- /dev/null
+++ b/tests/data/placebo/test_efs_query/elasticfilesystem.DescribeTags_1.json
@@ -0,0 +1,30 @@
+{
+ "status_code": 200,
+ "data": {
+ "ResponseMetadata": {
+ "RetryAttempts": 0,
+ "HTTPStatusCode": 200,
+ "RequestId": "d6e6f4a2-13e8-11e7-a10d-03dc08f89c32",
+ "HTTPHeaders": {
+ "x-amzn-requestid": "d6e6f4a2-13e8-11e7-a10d-03dc08f89c32",
+ "date": "Tue, 28 Mar 2017 19:00:41 GMT",
+ "content-length": "142",
+ "content-type": "application/json"
+ }
+ },
+ "Tags": [
+ {
+ "Value": "MyDocs",
+ "Key": "Name"
+ },
+ {
+ "Value": "Test User",
+ "Key": "POC"
+ },
+ {
+ "Value": "skunk",
+ "Key": "Service"
+ }
+ ]
+ }
+}
\ No newline at end of file
|
bokeh__bokeh-2235 | VBoxForm broken
Added a `float:left` to fix `sliders.py` which broke stock app example worse.
| [
{
"content": "\nfrom bokeh.io import vform\nfrom bokeh.plotting import figure, hplot, output_file, show, vplot, ColumnDataSource\nfrom bokeh.models.actions import Callback\nfrom bokeh.models.widgets import Slider\n\nimport numpy as np\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\ncallback = Callback(args=dict(source=source), code=\"\"\"\n var data = source.get('data');\n var A = amp.get('value')\n var k = freq.get('value')\n var phi = phase.get('value')\n var B = offset.get('value')\n x = data['x']\n y = data['y']\n for (i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.trigger('change');\n\"\"\")\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", callback=callback)\ncallback.args[\"amp\"] = amp_slider\n\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", callback=callback)\ncallback.args[\"freq\"] = freq_slider\n\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", callback=callback)\ncallback.args[\"phase\"] = phase_slider\n\noffset_slider = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", callback=callback)\ncallback.args[\"offset\"] = offset_slider\n\nlayout = hplot(\n vform(amp_slider, freq_slider, phase_slider, offset_slider),\n plot\n)\n\noutput_file(\"slider.html\")\n\nshow(layout)\n",
"path": "examples/plotting/file/slider.py"
}
] | [
{
"content": "\nfrom bokeh.io import vform\nfrom bokeh.plotting import figure, hplot, output_file, show, vplot, ColumnDataSource\nfrom bokeh.models.actions import Callback\nfrom bokeh.models.widgets import Slider\n\nimport numpy as np\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\ncallback = Callback(args=dict(source=source), code=\"\"\"\n var data = source.get('data');\n var A = amp.get('value')\n var k = freq.get('value')\n var phi = phase.get('value')\n var B = offset.get('value')\n x = data['x']\n y = data['y']\n for (i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.trigger('change');\n\"\"\")\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", callback=callback)\ncallback.args[\"amp\"] = amp_slider\n\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", callback=callback)\ncallback.args[\"freq\"] = freq_slider\n\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", callback=callback)\ncallback.args[\"phase\"] = phase_slider\n\noffset_slider = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", callback=callback)\ncallback.args[\"offset\"] = offset_slider\n\nlayout = hplot(\n plot,\n vform(amp_slider, freq_slider, phase_slider, offset_slider),\n)\n\noutput_file(\"slider.html\")\n\nshow(layout)\n",
"path": "examples/plotting/file/slider.py"
}
] | diff --git a/bokehjs/src/less/widgets.less b/bokehjs/src/less/widgets.less
index 0069826f406..0d937da33d8 100644
--- a/bokehjs/src/less/widgets.less
+++ b/bokehjs/src/less/widgets.less
@@ -14,7 +14,6 @@
.bk-widget-form {
padding: 30px 30px 30px 30px;
overflow: hidden;
- float:left;
}
.bk-widget-form-group {
diff --git a/examples/plotting/file/slider.py b/examples/plotting/file/slider.py
index 71f3af31ad6..314dbe0f088 100644
--- a/examples/plotting/file/slider.py
+++ b/examples/plotting/file/slider.py
@@ -42,8 +42,8 @@
callback.args["offset"] = offset_slider
layout = hplot(
+ plot,
vform(amp_slider, freq_slider, phase_slider, offset_slider),
- plot
)
output_file("slider.html")
|
typeddjango__django-stubs-1371 | Next release planning (1.15.0)
I'll make a new release a soonish, perhaps this weekend or next week, now that mypy 1.0 is being tested in CI and used for `django-stubs[compatible-mypy]`.
* #1360
I'd like to make a dual release together with djangorestframework-stubs, so recommended mypy version stays in sync between both projects. But there's some work to be done still on that side: https://github.com/typeddjango/djangorestframework-stubs/issues/324#issuecomment-1421098490
Additionally, nice to have PRs waiting, communtiy reviewers welcome:
* #1309
* #1308
| [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=1.0,<1.1\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.14.0\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=1.0,<1.1\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.15.0\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/README.md b/README.md
index f75a1e57a..2fde6dddf 100644
--- a/README.md
+++ b/README.md
@@ -51,6 +51,7 @@ We rely on different `django` and `mypy` versions:
| django-stubs | mypy version | django version | python version
|--------------| ---- | ---- | ---- |
+| 1.15.0 | 1.0.x | 3.2.x or 4.0.x or 4.1.x | ^3.7
| 1.14.0 | 0.990+ | 3.2.x or 4.0.x or 4.1.x | ^3.7
| 1.13.0 | 0.980+ | 3.2.x or 4.0.x or 4.1.x | ^3.7
| 1.12.0 | 0.931+ | 3.2.x or 4.0.x | ^3.7
diff --git a/setup.py b/setup.py
index 11a5ae240..019e1ca1b 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ def find_stub_files(name: str) -> List[str]:
setup(
name="django-stubs",
- version="1.14.0",
+ version="1.15.0",
description="Mypy stubs for Django",
long_description=readme,
long_description_content_type="text/markdown",
|
voxel51__fiftyone-3297 | [BUG] fiftyone forces starlette=0.16.0 and it breaks integrations with applications that use FastAPI in newer versions.
### Instructions
Thank you for submitting an issue. Please refer to our
[issue policy](https://www.github.com/voxel51/fiftyone/blob/develop/ISSUE_POLICY.md)
for information on what types of issues we address.
**Please fill in this template to ensure a timely and thorough response.**
- Place an "x" between the brackets next to an option if it applies. Example:
- [x] Selected option
- Please delete this section (all content above this line) before submitting
the issue
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: linux 18.04
- **FiftyOne installed from (pip or source)**:pip
- **FiftyOne version (run `fiftyone --version`)**:0.17.2
- **Python version**:3.8
### Commands to reproduce
As thoroughly as possible, please provide the Python and/or shell commands used
to encounter the issue. Application steps can be described in the next section.
requriements.txt:
fastapi==0.79.0
fiftyone==0.17.2
```
pip install -r requirments.txt
```
### Describe the problem
fiftyone cannot be used with the newer versions of fastapi, because it forces starlette to be in the version starlette=0.16.0
Is it possible to add a condition like: starlette>=0.16.0. In this way it would not break apps that use fiftyone
### Code to reproduce issue
fastapi==0.79.0
fiftyone==0.17.2
pip install -r requirments.txt
### Other info / logs
es.
#0 388.1
#0 388.1 The conflict is caused by:
#0 388.1 bentoml 1.0.4 depends on starlette
#0 388.1 fastapi 0.79.0 depends on starlette==0.19.1
#0 388.1 fiftyone 0.17.2 depends on starlette==0.16.0
#0 388.1
#0 388.1 To fix this you could try to:
#0 388.1 1. loosen the range of package versions you've specified
#0 388.1 2. remove package versions to allow pip attempt to solve the dependency conflict
#0 388.1
#0 388.1 ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/topics/dependency-resolution/#dealing-with-dependency-conflicts
#0 388.1 WARNING: You are using pip version 22.0.4; however, version 22.3 is available.
#0 388.1 You should consider upgrading via the '/usr/bin/python -m pip install --upgrade pip' command.
------
### What areas of FiftyOne does this bug affect?
- [X ] `App`: FiftyOne application issue
- [ ] `Core`: Core `fiftyone` Python library issue
- [ ] `Server`: Fiftyone server issue
### Willingness to contribute
The FiftyOne Community encourages bug fix contributions. Would you or another
member of your organization be willing to contribute a fix for this bug to the
FiftyOne codebase?
- [ X] Yes. I can contribute a fix for this bug independently.
- [ ] Yes. I would be willing to contribute a fix for this bug with guidance
from the FiftyOne community.
- [ ] No. I cannot contribute a bug fix at this time.
| [
{
"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.3\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0,<0.27\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.3\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 826a6ba39a5..002e6a8e3ed 100644
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,7 @@ def get_version():
"setuptools",
"sseclient-py>=1.7.2,<2",
"sse-starlette>=0.10.3,<1",
- "starlette>=0.24.0,<0.27",
+ "starlette>=0.24.0",
"strawberry-graphql==0.138.1",
"tabulate",
"xmltodict",
|
learningequality__kolibri-5872 | update perseus to use new build config scheme
### Observed behavior
follow-up from #5864, need to update perseus to use new buildconfig. Currently builds but does not run.
### Errors and logs
Currently getting:
```
ERROR Internal Server Error: /en/user/
Traceback (most recent call last):
File "/Users/d/Projects/le/kolibri/kolibri/core/webpack/hooks.py", line 111, in _stats_file_content
with io.open(self._stats_file, mode="r", encoding="utf-8") as f:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/d/Projects/le/kolibri/.venv/lib/python3.7/site-packages/kolibri_exercise_perseus_plugin/build/_stats.json'
```
### Context
current 0.13.0 develop branch
| [
{
"content": "import argparse\nimport importlib\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom pkg_resources import DistributionNotFound\nfrom pkg_resources import get_distribution\nfrom pkg_resources import resource_exists\nfrom pkg_resources import resource_filename\nfrom pkg_resources import resource_isdir\nfrom pkg_resources import resource_listdir\n\nlogger = logging.getLogger(\"webpack_json\")\nlogger.setLevel(level=logging.INFO)\n\nBUILD_CONFIG = \"buildConfig.js\"\n\n\ndef load_plugins_from_file(file_path):\n try:\n import requests\n except ImportError:\n requests = None\n # We have been passed a URL, not a local file path\n if file_path.startswith(\"http\"):\n if requests is None:\n raise ImportError(\"Requests is required to import plugins from urls\")\n print(\n \"Downloading plugins manifest from {file_path}\".format(file_path=file_path)\n )\n _, path = tempfile.mkstemp(suffix=\".txt\", text=True)\n with open(path, \"w\") as f:\n r = requests.get(file_path)\n f.write(r.content)\n file_path = path\n with open(file_path, \"r\") as f:\n return [plugin.strip() for plugin in f.readlines() if plugin.strip()]\n\n\ndef expand_glob(build_item):\n plugins = []\n # Do a very simple check here, only deal with a single * at the end of something!\n if (\n len([item for item in build_item.split(\".\") if item == \"*\"]) > 1\n or build_item.endswith(\"**\")\n or build_item == \"*\"\n or not build_item.endswith(\"*\")\n ):\n logging.error(\"Too many * paths, only use one per module spec\")\n return plugins\n parent_module_path = \".\".join(\n [item for item in build_item.split(\".\") if item and item != \"*\"]\n )\n try:\n for file in resource_listdir(parent_module_path, \".\"):\n if resource_isdir(parent_module_path, file):\n try:\n child_module_path = parent_module_path + \".\" + file\n plugin = plugin_data(child_module_path)\n if plugin is not None:\n plugins.append(plugin)\n except ImportError:\n continue\n except OSError:\n pass\n return plugins\n\n\ndef plugin_data(module_path):\n try:\n if resource_exists(module_path, BUILD_CONFIG):\n plugin_path = os.path.dirname(resource_filename(module_path, BUILD_CONFIG))\n try:\n version = get_distribution(module_path).version\n except (DistributionNotFound, AttributeError):\n try:\n module = importlib.import_module(module_path)\n version = module.__version__\n except (ImportError, AttributeError):\n import kolibri\n\n version = kolibri.__version__\n if module_path.startswith(\"kolibri.\"):\n import kolibri\n\n locale_data_folder = os.path.join(\n os.path.dirname(kolibri.__file__), \"locale\", \"en\", \"LC_MESSAGES\"\n )\n # Is an external plugin, do otherwise!\n else:\n locale_data_folder = os.path.join(\n plugin_path, \"locale\", \"en\", \"LC_MESSAGES\"\n )\n return {\n \"locale_data_folder\": locale_data_folder,\n \"plugin_path\": plugin_path,\n \"version\": version,\n }\n # Python 3.{4,5,6} raises a NotImplementedError for an empty directory\n # Python 3.7 raises a TypeError for an empty directory\n except (NotImplementedError, TypeError):\n pass\n raise ImportError(\"No frontend build assets\")\n\n\ndef initialize_plugins(build_list):\n plugins = []\n for build_item in build_list:\n if \"*\" in build_item:\n plugins += expand_glob(build_item)\n elif build_item:\n # No '*' in the module path, so just add it naively\n plugin = plugin_data(build_item)\n if plugin is not None:\n plugins.append(plugin)\n return plugins\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--plugin_file\",\n help=\"the filepath to which you'd like to run plugins from\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"--plugins\",\n help=\"provide a space separated list of plugins you'd like to run\",\n type=str,\n nargs=\"*\",\n default=None,\n )\n parser.add_argument(\n \"--plugin_path\",\n help=\"provide a path to add to the Python path to enable import of the plugins\",\n type=str,\n default=os.getcwd(),\n )\n parser.add_argument(\n \"-o\", \"--output_file\", type=str, default=None, dest=\"output_file\"\n )\n parser.add_argument(\"-v\", \"--verbose\", default=False, action=\"store_true\")\n args = parser.parse_args()\n build_list = []\n\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n\n plugin_path = os.path.realpath(args.plugin_path)\n\n # Add our plugin_path to the path\n sys.path.append(plugin_path)\n\n # Put environment variable setting first to allow customized builds within buildkite through env vars\n if \"BUILD_TIME_PLUGINS\" in os.environ and os.environ[\"BUILD_TIME_PLUGINS\"]:\n build_list = load_plugins_from_file(os.environ[\"BUILD_TIME_PLUGINS\"])\n elif args.plugin_file:\n build_list = load_plugins_from_file(args.plugin_file)\n elif args.plugins:\n build_list = args.plugins\n\n logger.info(\"Gathering relevant modules from {}\".format(build_list))\n\n result = initialize_plugins(build_list)\n\n if args.output_file:\n logger.info(\"Writing webpack_json output to {}\".format(args.output_file))\n with open(args.output_file, \"w\") as f:\n json.dump(result, f)\n else:\n logger.info(\"No output file argument; writing webpack_json output to stdout.\")\n logger.info(json.dumps(result))\n\n # Remove the plugin_path from the path to clean up\n sys.path.remove(plugin_path)\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "packages/kolibri-tools/lib/webpack_json.py"
}
] | [
{
"content": "import argparse\nimport importlib\nimport json\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom pkg_resources import DistributionNotFound\nfrom pkg_resources import get_distribution\nfrom pkg_resources import resource_exists\nfrom pkg_resources import resource_filename\nfrom pkg_resources import resource_isdir\nfrom pkg_resources import resource_listdir\n\nlogger = logging.getLogger(\"webpack_json\")\nlogger.setLevel(level=logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nlogger.addHandler(handler)\n\nBUILD_CONFIG = \"buildConfig.js\"\n\n\ndef load_plugins_from_file(file_path):\n try:\n import requests\n except ImportError:\n requests = None\n # We have been passed a URL, not a local file path\n if file_path.startswith(\"http\"):\n if requests is None:\n raise ImportError(\"Requests is required to import plugins from urls\")\n print(\n \"Downloading plugins manifest from {file_path}\".format(file_path=file_path)\n )\n _, path = tempfile.mkstemp(suffix=\".txt\", text=True)\n with open(path, \"w\") as f:\n r = requests.get(file_path)\n f.write(r.content)\n file_path = path\n with open(file_path, \"r\") as f:\n return [plugin.strip() for plugin in f.readlines() if plugin.strip()]\n\n\ndef expand_glob(build_item):\n plugins = []\n # Do a very simple check here, only deal with a single * at the end of something!\n if (\n len([item for item in build_item.split(\".\") if item == \"*\"]) > 1\n or build_item.endswith(\"**\")\n or build_item == \"*\"\n or not build_item.endswith(\"*\")\n ):\n logging.error(\"Too many * paths, only use one per module spec\")\n return plugins\n parent_module_path = \".\".join(\n [item for item in build_item.split(\".\") if item and item != \"*\"]\n )\n try:\n for file in resource_listdir(parent_module_path, \".\"):\n if resource_isdir(parent_module_path, file):\n try:\n child_module_path = parent_module_path + \".\" + file\n plugin = plugin_data(child_module_path)\n if plugin is not None:\n plugins.append(plugin)\n except ImportError:\n continue\n except OSError:\n pass\n return plugins\n\n\ndef plugin_data(module_path):\n try:\n if resource_exists(module_path, BUILD_CONFIG):\n plugin_path = os.path.dirname(resource_filename(module_path, BUILD_CONFIG))\n try:\n version = get_distribution(module_path).version\n except (DistributionNotFound, AttributeError):\n try:\n module = importlib.import_module(module_path)\n version = module.__version__\n except (ImportError, AttributeError):\n import kolibri\n\n version = kolibri.__version__\n if module_path.startswith(\"kolibri.\"):\n import kolibri\n\n locale_data_folder = os.path.join(\n os.path.dirname(kolibri.__file__), \"locale\", \"en\", \"LC_MESSAGES\"\n )\n # Is an external plugin, do otherwise!\n else:\n locale_data_folder = os.path.join(\n plugin_path, \"locale\", \"en\", \"LC_MESSAGES\"\n )\n return {\n \"locale_data_folder\": locale_data_folder,\n \"plugin_path\": plugin_path,\n \"version\": version,\n }\n # Python 3.{4,5,6} raises a NotImplementedError for an empty directory\n # Python 3.7 raises a TypeError for an empty directory\n except (NotImplementedError, TypeError):\n pass\n raise ImportError(\"No frontend build assets\")\n\n\ndef initialize_plugins(build_list):\n plugins = []\n for build_item in build_list:\n if \"*\" in build_item:\n plugins += expand_glob(build_item)\n elif build_item:\n # No '*' in the module path, so just add it naively\n plugin = plugin_data(build_item)\n if plugin is not None:\n plugins.append(plugin)\n return plugins\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--plugin_file\",\n help=\"the filepath to which you'd like to run plugins from\",\n type=str,\n default=None,\n )\n parser.add_argument(\n \"--plugins\",\n help=\"provide a space separated list of plugins you'd like to run\",\n type=str,\n nargs=\"*\",\n default=None,\n )\n parser.add_argument(\n \"--plugin_path\",\n help=\"provide a path to add to the Python path to enable import of the plugins\",\n type=str,\n default=os.getcwd(),\n )\n parser.add_argument(\n \"-o\", \"--output_file\", type=str, default=None, dest=\"output_file\"\n )\n parser.add_argument(\"-v\", \"--verbose\", default=False, action=\"store_true\")\n args = parser.parse_args()\n build_list = []\n\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n\n plugin_path = os.path.realpath(args.plugin_path)\n\n # Add our plugin_path to the path\n sys.path.append(plugin_path)\n\n # Put environment variable setting first to allow customized builds within buildkite through env vars\n if \"BUILD_TIME_PLUGINS\" in os.environ and os.environ[\"BUILD_TIME_PLUGINS\"]:\n build_list = load_plugins_from_file(os.environ[\"BUILD_TIME_PLUGINS\"])\n elif args.plugin_file:\n build_list = load_plugins_from_file(args.plugin_file)\n elif args.plugins:\n build_list = args.plugins\n\n logger.info(\"Gathering relevant modules from {}\".format(build_list))\n\n result = initialize_plugins(build_list)\n\n if args.output_file:\n logger.info(\"Writing webpack_json output to {}\".format(args.output_file))\n with open(args.output_file, \"w\") as f:\n json.dump(result, f)\n else:\n logger.info(\"No output file argument; writing webpack_json output to stdout.\")\n logger.info(json.dumps(result))\n\n # Remove the plugin_path from the path to clean up\n sys.path.remove(plugin_path)\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "packages/kolibri-tools/lib/webpack_json.py"
}
] | diff --git a/kolibri/core/package.json b/kolibri/core/package.json
index 89b1a9847aa..407ca6f379e 100644
--- a/kolibri/core/package.json
+++ b/kolibri/core/package.json
@@ -40,7 +40,7 @@
"vuex": "^3.1.0"
},
"devDependencies": {
- "kolibri-tools": "0.12.0-beta.3.2",
+ "kolibri-tools": "0.13.0-dev.3",
"responselike": "^1.0.2"
}
-}
+}
\ No newline at end of file
diff --git a/package.json b/package.json
index f32f1d4fc70..057a24fd3ff 100644
--- a/package.json
+++ b/package.json
@@ -54,7 +54,7 @@
"devDependencies": {
"@types/jest": "^24.0.12",
"black-fmt": "https://github.com/learningequality/black-fmt#v0.1.3",
- "kolibri-tools": "0.12.0-beta.3.2",
+ "kolibri-tools": "0.13.0-dev.3",
"yarn-run-all": "^3.1.1"
},
"optionalDependencies": {
@@ -65,4 +65,4 @@
"node": "10.x",
"yarn": ">= 1.12.3"
}
-}
+}
\ No newline at end of file
diff --git a/packages/eslint-plugin-kolibri/package.json b/packages/eslint-plugin-kolibri/package.json
index 1e88fbbcfac..dd57c58f7ca 100644
--- a/packages/eslint-plugin-kolibri/package.json
+++ b/packages/eslint-plugin-kolibri/package.json
@@ -1,6 +1,6 @@
{
"name": "eslint-plugin-kolibri",
- "version": "0.12.0-beta.3.2",
+ "version": "0.13.0-dev.3",
"description": "Custom rules.",
"author": "Learning Equality",
"main": "lib/index.js",
@@ -14,4 +14,4 @@
"node": ">=0.10.0"
},
"license": "MIT"
-}
+}
\ No newline at end of file
diff --git a/packages/kolibri-core-for-export/package.json b/packages/kolibri-core-for-export/package.json
index 04b9050f3ff..830d8eb2024 100644
--- a/packages/kolibri-core-for-export/package.json
+++ b/packages/kolibri-core-for-export/package.json
@@ -1,6 +1,6 @@
{
"name": "kolibri",
- "version": "0.12.0-beta.3.2",
+ "version": "0.13.0-dev.3",
"description": "The Kolibri core API",
"repository": "github.com/learningequality/kolibri",
"author": "Learning Equality",
@@ -8,6 +8,6 @@
"private": false,
"dependencies": {},
"devDependencies": {
- "kolibri-tools": "0.12.0-beta.3.2"
+ "kolibri-tools": "0.13.0-dev.3"
}
-}
+}
\ No newline at end of file
diff --git a/packages/kolibri-tools/.npmignore b/packages/kolibri-tools/.npmignore
index 8b38d75008a..2b60090d23d 100644
--- a/packages/kolibri-tools/.npmignore
+++ b/packages/kolibri-tools/.npmignore
@@ -1 +1 @@
-build.js
+build_kolibri_tools.js
diff --git a/packages/kolibri-tools/lib/read_webpack_json.js b/packages/kolibri-tools/lib/read_webpack_json.js
index 0cd09255ade..4098a251d11 100644
--- a/packages/kolibri-tools/lib/read_webpack_json.js
+++ b/packages/kolibri-tools/lib/read_webpack_json.js
@@ -5,31 +5,11 @@ const temp = require('temp').track();
const webpack_json = path.resolve(path.dirname(__filename), './webpack_json.py');
-function parseConfig(buildConfig, pythonData) {
+function parseConfig(buildConfig, pythonData, configPath, index = null) {
// Set the main entry for this module, set the name based on the data.name and the path to the
// entry file from the data.src_file
const bundleId = buildConfig.bundle_id;
- const webpackConfig = buildConfig.webpack_config;
const pluginPath = pythonData.plugin_path;
- if (typeof webpackConfig.entry === 'string') {
- webpackConfig.entry = {
- [bundleId]: path.join(pluginPath, webpackConfig.entry),
- };
- } else {
- Object.keys(webpackConfig.entry).forEach(key => {
- function makePathAbsolute(entryPath) {
- if (entryPath.startsWith('./') || entryPath.startsWith('../')) {
- return path.join(pluginPath, entryPath);
- }
- return entryPath;
- }
- if (Array.isArray(webpackConfig.entry[key])) {
- webpackConfig.entry[key] = webpackConfig.entry[key].map(makePathAbsolute);
- } else {
- webpackConfig.entry[key] = makePathAbsolute(webpackConfig.entry[key]);
- }
- });
- }
return {
name: bundleId,
static_dir: path.join(pluginPath, 'static'),
@@ -37,7 +17,8 @@ function parseConfig(buildConfig, pythonData) {
locale_data_folder: pythonData.locale_data_folder,
plugin_path: pluginPath,
version: pythonData.version,
- config: webpackConfig,
+ config_path: configPath,
+ index,
};
}
@@ -84,13 +65,14 @@ module.exports = function({ pluginFile, plugins, pluginPath }) {
const parsedResult = JSON.parse(result);
const output = [];
parsedResult.forEach(pythonData => {
- const buildConfig = require(path.join(pythonData.plugin_path, 'buildConfig.js'));
+ const configPath = path.join(pythonData.plugin_path, 'buildConfig.js');
+ const buildConfig = require(configPath);
if (Array.isArray(buildConfig)) {
- buildConfig.forEach(configObj => {
- output.push(parseConfig(configObj, pythonData));
+ buildConfig.forEach((configObj, i) => {
+ output.push(parseConfig(configObj, pythonData, configPath, i));
});
} else {
- output.push(parseConfig(buildConfig, pythonData));
+ output.push(parseConfig(buildConfig, pythonData, configPath));
}
});
return output;
diff --git a/packages/kolibri-tools/lib/webpack.config.base.js b/packages/kolibri-tools/lib/webpack.config.base.js
index 141090bcb2a..86169fc5a5f 100644
--- a/packages/kolibri-tools/lib/webpack.config.base.js
+++ b/packages/kolibri-tools/lib/webpack.config.base.js
@@ -38,7 +38,7 @@ const WebpackMessages = require('./webpackMessages');
module.exports = (data, { mode = 'development', hot = false } = {}) => {
if (
typeof data.name === 'undefined' ||
- typeof data.config === 'undefined' ||
+ typeof data.config_path === 'undefined' ||
typeof data.static_dir === 'undefined' ||
typeof data.stats_file === 'undefined' ||
typeof data.locale_data_folder === 'undefined' ||
@@ -48,7 +48,32 @@ module.exports = (data, { mode = 'development', hot = false } = {}) => {
logging.error(data.name + ' plugin is misconfigured, missing parameter(s)');
return;
}
-
+ const configData = require(data.config_path);
+ let webpackConfig;
+ if (data.index !== null) {
+ webpackConfig = configData[data.index].webpack_config;
+ } else {
+ webpackConfig = configData.webpack_config;
+ }
+ if (typeof webpackConfig.entry === 'string') {
+ webpackConfig.entry = {
+ [data.name]: path.join(data.plugin_path, webpackConfig.entry),
+ };
+ } else {
+ Object.keys(webpackConfig.entry).forEach(key => {
+ function makePathAbsolute(entryPath) {
+ if (entryPath.startsWith('./') || entryPath.startsWith('../')) {
+ return path.join(data.plugin_path, entryPath);
+ }
+ return entryPath;
+ }
+ if (Array.isArray(webpackConfig.entry[key])) {
+ webpackConfig.entry[key] = webpackConfig.entry[key].map(makePathAbsolute);
+ } else {
+ webpackConfig.entry[key] = makePathAbsolute(webpackConfig.entry[key]);
+ }
+ });
+ }
const production = mode === 'production';
const cssInsertionLoader = hot ? 'style-loader' : MiniCssExtractPlugin.loader;
@@ -82,7 +107,7 @@ module.exports = (data, { mode = 'development', hot = false } = {}) => {
let externals;
- if (!data.config.output || data.config.output.library !== kolibriName) {
+ if (!webpackConfig.output || webpackConfig.output.library !== kolibriName) {
// If this is not the core bundle, then we need to add the external library mappings.
externals = coreExternals;
} else {
@@ -236,7 +261,7 @@ module.exports = (data, { mode = 'development', hot = false } = {}) => {
stats: 'minimal',
};
- bundle = merge.smart(bundle, data.config);
+ bundle = merge.smart(bundle, webpackConfig);
return bundle;
};
diff --git a/packages/kolibri-tools/lib/webpack_json.py b/packages/kolibri-tools/lib/webpack_json.py
index 976d6a6de22..b1fbd757008 100644
--- a/packages/kolibri-tools/lib/webpack_json.py
+++ b/packages/kolibri-tools/lib/webpack_json.py
@@ -15,6 +15,9 @@
logger = logging.getLogger("webpack_json")
logger.setLevel(level=logging.INFO)
+handler = logging.StreamHandler()
+handler.setLevel(logging.INFO)
+logger.addHandler(handler)
BUILD_CONFIG = "buildConfig.js"
diff --git a/packages/kolibri-tools/package.json b/packages/kolibri-tools/package.json
index 0f615cd15d7..4f52b5b7e62 100644
--- a/packages/kolibri-tools/package.json
+++ b/packages/kolibri-tools/package.json
@@ -1,6 +1,6 @@
{
"name": "kolibri-tools",
- "version": "0.12.0-beta.3.2",
+ "version": "0.13.0-dev.3",
"description": "Tools for building Kolibri frontend plugins",
"main": "lib/cli.js",
"repository": "github.com/learningequality/kolibri",
@@ -32,7 +32,7 @@
"eslint-config-prettier": "^2.9.0",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-jest": "^21.26.1",
- "eslint-plugin-kolibri": "0.12.0-beta.3.2",
+ "eslint-plugin-kolibri": "0.13.0-dev.3",
"eslint-plugin-vue": "^5.2.2",
"espree": "^5.0.1",
"esquery": "^1.0.1",
@@ -93,6 +93,6 @@
"readline-sync": "^1.4.9"
},
"optionalDependencies": {
- "kolibri": "0.12.0-beta.3.2"
+ "kolibri": "0.13.0-dev.3"
}
-}
+}
\ No newline at end of file
diff --git a/packages/kolibri-tools/test/test_webpack.config.base.spec.js b/packages/kolibri-tools/test/test_webpack.config.base.spec.js
index d4d471865b1..fe77c30aef8 100644
--- a/packages/kolibri-tools/test/test_webpack.config.base.spec.js
+++ b/packages/kolibri-tools/test/test_webpack.config.base.spec.js
@@ -16,6 +16,16 @@ jest.mock('../lib/logging', () => ({
},
}));
+jest.mock(
+ 'test',
+ () => ({
+ webpack_config: {
+ entry: 'test',
+ },
+ }),
+ { virtual: true }
+);
+
const baseData = {
name: 'kolibri.plugin.test.test_plugin',
stats_file: 'output.json',
@@ -24,11 +34,8 @@ const baseData = {
locale_data_folder: 'kolibri/locale/test',
version: 'test',
plugin_path: 'kolibri/plugin',
- config: {
- entry: {
- test_plugin: 'src/file.js',
- },
- },
+ config_path: 'test',
+ index: null,
};
describe('webpackConfigBase', function() {
@@ -86,9 +93,9 @@ describe('webpackConfigBase', function() {
expectParsedDataIsUndefined(data);
});
});
- describe('input is missing config, bundles output', function() {
+ describe('input is missing config_path, bundles output', function() {
it('should be undefined', function() {
- delete data.config;
+ delete data.config_path;
expectParsedDataIsUndefined(data);
});
});
diff --git a/requirements/base.txt b/requirements/base.txt
index f2df3082486..f3274de4b98 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -17,7 +17,7 @@ porter2stemmer==1.0
unicodecsv==0.14.1
metafone==0.5
le-utils==0.1.17
-kolibri_exercise_perseus_plugin==1.2.0
+kolibri_exercise_perseus_plugin==1.2.1
jsonfield==2.0.2
morango==0.4.9
requests-toolbelt==0.8.0
|
oppia__oppia-8773 | All the Frontend services should be documented with jsdoc.
**This starter issue is currently on hold because we do not have the capacity to support new contributors working on it.**
--------------
We aim to document all the files listed below.
Each of the below-listed files should have a file overview signifying the purpose of the file,
and each function should have its meaning, arguments and return statement documented with the help of jsdoc decorators like `@fileoverview`, `@param`, `@return`.
You can go through these services to get some reference:
- graph-input-rules.service.ts
- exploration-html-formatter.service.ts
- graph-utils.service.ts
- alerts.service.ts
- playthrough-issues.service.ts
**Deducing variable's significance and the meaning from the code:**
Try and execute the code by running a dev server locally, and log the variable type (you can use typeof for this) and try to find out the purpose of the variable(what's the variable storing, what is it being used for, what would break if we remove the variable?). To figure out how to execute the code, grep to see what methods call the function, and add console logs to ensure that the code is being executed when you perform the corresponding action in the UI. (As a sanity check, you might also want to ensure that the suspected variable type is consistent with any TypeScript types that are already provided.)
**Overview of the function:**
Finding or deducing the overview or the purpose of the function can be sometimes a bit tricky, some general advice can be to think--
- why is this function even required, what does it helps us achieve. Try to think from the perspective of the person who created the function and try to mimic the thought process of the original author.
- Look at the callers of the function, see all the places where this function is being called at and try to get a better understanding of the function.
- If you are unable to understand the purpose of the function, feel free to reach out to your mentor(always happy to help).
Please go through this [doc](https://docs.google.com/document/d/1jr8X3oqW7WqKxOgsK8b4TxIraODAV23vDJgYso1R7Pk/edit?usp=sharing) for a deeper context.
**Please don't include types in the JSDoc, use the TypeScript annotations for that.**
PR's for reference: [#8773](https://github.com/oppia/oppia/pull/8773)
**To be assigned to a file or for any queries, comment on the thread and tag @nithusha21.**
The listed services file below needs to be documented:
- [ ] admin-config-tab-backend-api.service.ts
- [ ] admin-data.service.ts
- [ ] admin-router.service.ts @anumehaagrawal
- [ ] admin-task-manager.service.ts @larakhdavies
- [ ] alerts.service.ts
- [ ] angular-name.service.ts @parulpriyedarshani
- [ ] answer-classification.service.ts
- [ ] answer-groups-cache.service.ts
- [ ] assets-backend-api.service.ts
- [ ] audio-pFlayer.service.ts
- [ ] audio-preloader.service.ts
- [ ] audio-translation-language.service.ts @kaylahardie
- [ ] audio-translation-manager.service.ts
- [ ] autogenerated-audio-player.service.ts @BlakeHan01
- [ ] autoplayed-videos.service.ts @darkpsychic
- [ ] autosave-info-modals.service.ts
- [ ] background-mask.service.ts
- [ ] base-undo-redo.service.ts
- [ ] browser-checker.service.ts
- [ ] change-list.service.ts
- [ ] changes-in-human-readable-form.service.ts
- [ ] classroom-backend-api.service.ts @ReshuKumari
- [ ] code-normalizer.service.ts
- [ ] collection-creation-backend-api.service.ts
- [ ] collection-creation.service.ts
- [ ] collection-editor-state.service.ts
- [ ] collection-linearizer.service.ts
- [ ] collection-rights-backend-api.service.ts
- [ ] collection-update.service.ts
- [ ] collection-validation.service.ts
- [ ] compare-versions.service.ts
- [ ] compute-graph.service.ts
- [ ] concept-card-backend-api.service.ts
- [ ] construct-translation-ids.service.ts @BlakeHan01
- [ ] context.service.ts
- [ ] contribution-and-review.service.ts @lelouchB
- [ ] contribution-opportunities-backend-api.service.ts
- [ ] contribution-opportunities.service.ts
- [ ] creator-dashboard-backend-api.service.ts
- [ ] csrf-token.service.ts
- [ ] current-interaction.service.ts
- [ ] date-time-format.service.ts @linnhallonqvist
- [ ] debouncer.service.ts
- [ ] debug-info-tracker.service.ts
- [ ] device-info.service.ts
- [ ] document-attribute-customization.service.ts
- [ ] editability.service.ts
- [ ] editable-collection-backend-api.service.ts
- [ ] editable-exploration-backend-api.service.ts
- [ ] editable-question-backend-api.service.ts
- [ ] editable-skill-backend-api.service.ts
- [ ] editable-story-backend-api.service.ts
- [ ] editable-topic-backend-api.service.ts
- [ ] editor-first-time-events.service.ts
- [ ] email-dashboard-data.service.ts
- [ ] exploration-automatic-text-to-speech.service.ts
- [ ] exploration-category.service.ts
- [ ] exploration-correctness-feedback.service.ts
- [ ] exploration-creation.service.ts
- [ ] exploration-data.service.ts
- [ ] exploration-diff.service.ts
- [ ] exploration-embed-button.service.ts
- [ ] exploration-engine.service.ts
- [ ] exploration-features-backend-api.service.ts
- [ ] exploration-features.service.ts @parulpriyedarshani
- [ ] exploration-html-formatter.service.ts
- [ ] exploration-init-state-name.service.ts
- [ ] exploration-language-code.service.ts
- [ ] exploration-objective.service.ts
- [ ] exploration-param-changes.service.ts
- [ ] exploration-param-specs.service.ts
- [ ] exploration-player-state.service.ts
- [ ] exploration-property.service.ts
- [ ] exploration-recommendations.service.ts
- [ ] exploration-rights.service.ts
- [ ] exploration-save.service.ts
- [ ] exploration-states.service.ts
- [ ] exploration-summary-backend-api.service.ts
- [ ] exploration-tags.service.ts @shrutisatish00
- [ ] exploration-title.service.ts
- [ ] exploration-warnings.service.ts
- [ ] expression-evaluator.service.ts
- [ ] expression-interpolation.service.ts
- [ ] expression-parser.service.ts
- [ ] expression-syntax-tree.service.ts
- [ ] expression-type-parser.service.ts
- [ ] extension-tag-assembler.service.ts
- [ ] extract-image-filenames-from-state.service.ts
- [ ] fatigue-detection.service.ts
- [ ] focus-manager.service.ts
- [ ] generate-content-id.service.ts
- [ ] graph-data.service.ts
- [ ] graph-layout.service.ts
- [ ] guest-collection-progress.service.ts
- [ ] hint-and-solution-modal.service.ts
- [ ] hints-and-solution-manager.service.ts
- [ ] html-escaper.service.ts @tianqi-wu
- [ ] id-generation.service.ts
- [ ] image-preloader.service.ts
- [ ] image-upload-helper.service.ts
- [ ] improvement-modal.service.ts
- [ ] improvement-task.service.ts
- [ ] improvements-display.service.ts
- [ ] improvements.service.ts
- [ ] interaction-details-cache.service.ts
- [ ] language-util.service.ts
- [ ] learner-action-render.service.ts
- [ ] learner-answer-details-backend-api.service.ts
- [ ] learner-answer-details-data.service.ts
- [ ] learner-answer-info.service.ts
- [ ] learner-dashboard-backend-api.service.ts
- [ ] learner-dashboard-ids-backend-api.service.ts
- [ ] learner-params.service.ts
- [ ] learner-playlist.service.ts
- [ ] learner-view-rating.service.ts
- [ ] local-storage.service.ts
- [ ] logger.service.ts @remigourdon
- [ ] messenger.service.ts @remigourdon
- [ ] meta-tag-customization.service.ts
- [ ] navigation.service.ts
- [ ] nested-directives-recursion-timeout-prevention.service.ts
- [ ] number-attempts.service.ts @gp201
- [ ] page-title.service.ts
- [ ] parameter-metadata.service.ts
- [ ] player-correctness-feedback-enabled.service.ts
- [ ] player-position.service.ts @tianqi-wu
- [ ] player-transcript.service.ts
- [ ] playthrough-issues-backend-api.service.ts
- [ ] playthrough-issues.service.ts
- [ ] playthrough.service.ts
- [ ] prediction-algorithm-registry.service.ts
- [ ] pretest-question-backend-api.service.ts
- [ ] promo-bar.service.ts
- [ ] question-backend-api.service.ts
- [ ] question-creation.service.ts
- [ ] question-player-engine.service.ts
- [ ] question-player-state.service.ts
- [ ] question-suggestion.service.ts
- [ ] question-undo-redo.service.ts
- [ ] question-update.service.ts
- [ ] questions-list.service.ts
- [ ] rating-computation.service.ts
- [ ] read-only-collection-backend-api.service.ts
- [ ] read-only-exploration-backend-api.service.ts
- [ ] refresher-exploration-confirmation-modal.service.ts
- [ ] request-interceptor.service.ts
- [ ] responses.service.ts
- [ ] review-test-backend-api.service.ts
- [ ] review-test-engine.service.ts
- [ ] router.service.ts
- [ ] rte-helper.service.ts
- [ ] schema-default-value.service.ts
- [ ] schema-undefined-last-element.service.ts
- [ ] search-explorations-backend-api.service.ts
- [ ] search.service.ts
- [ ] sidebar-status.service.ts
- [ ] site-analytics.service.ts
- [ ] skill-creation.service.ts
- [ ] skill-editor-routing.service.ts
- [ ] skill-editor-state.service.ts
- [ ] skill-mastery-backend-api.service.ts
- [ ] skill-rights-backend-api.service.ts
- [ ] skill-update.service.ts
- [ ] solution-validity.service.ts
- [ ] solution-verification.service.ts
- [ ] speech-synthesis-chunker.service.ts
- [ ] state-classifier-mapping.service.ts
- [ ] state-content.service.ts
- [ ] state-customization-args.service.ts
- [ ] state-editor.service.ts
- [ ] state-hints.service.ts
- [ ] state-improvement-suggestion.service.ts @bobbychen1999
- [ ] state-interaction-id.service.ts
- [ ] state-name.service.ts
- [ ] state-param-changes.service.ts
- [ ] state-property.service.ts
- [ ] state-recorded-voiceovers.service.ts
- [ ] state-rules-stats.service.ts
- [ ] state-solicit-answer-details.service.ts
- [ ] state-solution.service.ts
- [ ] state-top-answers-stats-backend-api.service.ts
- [ ] state-top-answers-stats.service.ts
- [ ] state-tutorial-first-time.service.ts @akeeoaobh
- [ ] state-written-translations.service.ts
- [ ] stats-reporting.service.ts
- [ ] story-creation.service.ts
- [ ] story-editor-state.service.ts @pengcheng95
- [ ] story-update.service.ts
- [ ] story-viewer-backend-api.service.ts
- [ ] subtopic-viewer-backend-api.service.ts
- [ ] suggestion-modal-for-creator-view.service.ts
- [ ] suggestion-modal-for-exploration-editor.service.ts
- [ ] suggestion-modal-for-exploration-player.service.ts
- [ ] suggestion-modal-for-learner-dashboard.service.ts
- [ ] suggestion-modal.service.ts
- [ ] thread-data.service.ts
- [ ] thread-status-display.service.ts
- [ ] topic-creation.service.ts
- [ ] topic-editor-routing.service.ts
- [ ] topic-editor-state.service.ts
- [ ] topic-rights-backend-api.service.ts
- [ ] topic-update.service.ts
- [ ] topic-viewer-backend-api.service.ts
- [ ] topics-and-skills-dashboard-backend-api.service.ts
- [ ] training-data-editor-panel.service.ts
- [ ] training-data.service.ts @felicityzhao99
- [ ] training-modal.service.ts @varuncj02
- [ ] translate-text.service.ts
- [ ] translation-file-hash-loader.service.ts
- [ ] translation-language.service.ts
- [ ] translation-status.service.ts
- [ ] translation-tab-active-content-id.service.ts
- [ ] translation-tab-active-mode.service.ts
- [ ] undo-redo.service.ts
- [ ] url-interpolation.service.ts @qinghaoyang
- [ ] url.service.ts @tianqi-wu
- [ ] user-email-preferences.service.ts @felicityzhao99
- [ ] user-exploration-permissions.service.ts
- [ ] user.service.ts
- [ ] utils.service.ts @rriyaldhi
- [ ] validators.service.ts
- [ ] version-tree.service.ts
- [ ] voiceover-recording.service.ts
- [ ] window-dimensions.service.ts @asafprivman
- [ ] window-ref.service.ts @larakhdavies
Note: For a guide on how to access Oppia's webpages, see [this](https://github.com/oppia/oppia/wiki/How-to-access-Oppia-webpages).
| [
{
"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script produces the expression parser.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport fileinput\nimport os\nimport re\nimport subprocess\n\nimport python_utils\n\nfrom . import common\nfrom . import setup\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nRun this script from the oppia root folder:\n python -m scripts.create_expression_parser\nThe root folder MUST be named 'oppia'.\n\"\"\")\n\n\ndef main(args=None):\n \"\"\"Produces the expression parser.\"\"\"\n unused_parsed_args = _PARSER.parse_args(args=args)\n setup.main(args=[])\n\n expression_parser_definition = os.path.join(\n 'core', 'templates', 'expressions', 'parser.pegjs')\n expression_parser_js = os.path.join(\n 'core', 'templates', 'expressions', 'parser.js')\n\n common.install_npm_library('pegjs', '0.8.0', common.OPPIA_TOOLS_DIR)\n\n subprocess.check_call([\n os.path.join(common.NODE_MODULES_PATH, 'pegjs', 'bin', 'pegjs'),\n expression_parser_definition, expression_parser_js])\n\n python_utils.PRINT('Done!')\n\n\nif __name__ == '__main__':\n main()\n",
"path": "scripts/create_expression_parser.py"
}
] | [
{
"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script produces the expression parser.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport os\nimport subprocess\n\nimport python_utils\n\nfrom . import common\nfrom . import setup\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nRun this script from the oppia root folder:\n python -m scripts.create_expression_parser\nThe root folder MUST be named 'oppia'.\n\"\"\")\n\n\ndef main(args=None):\n \"\"\"Produces the expression parser.\"\"\"\n unused_parsed_args = _PARSER.parse_args(args=args)\n setup.main(args=[])\n\n expression_parser_definition = os.path.join(\n 'core', 'templates', 'expressions', 'parser.pegjs')\n expression_parser_js = os.path.join(\n 'core', 'templates', 'expressions', 'parser.js')\n\n common.install_npm_library('pegjs', '0.8.0', common.OPPIA_TOOLS_DIR)\n\n subprocess.check_call([\n os.path.join(common.NODE_MODULES_PATH, 'pegjs', 'bin', 'pegjs'),\n expression_parser_definition, expression_parser_js])\n\n python_utils.PRINT('Done!')\n\n\nif __name__ == '__main__':\n main()\n",
"path": "scripts/create_expression_parser.py"
}
] | diff --git a/core/templates/pages/exploration-editor-page/services/user-email-preferences.service.ts b/core/templates/pages/exploration-editor-page/services/user-email-preferences.service.ts
index 52e55b80ca706..aedb11d3fc0f0 100644
--- a/core/templates/pages/exploration-editor-page/services/user-email-preferences.service.ts
+++ b/core/templates/pages/exploration-editor-page/services/user-email-preferences.service.ts
@@ -34,24 +34,42 @@ angular.module('oppia').factory('UserEmailPreferencesService', [
this.feedbackNotificationsMuted = feedbackNotificationsMuted;
this.suggestionNotificationsMuted = suggestionNotificationsMuted;
},
+ /**
+ * @return {boolean} Whether the feedback notification is muted.
+ */
areFeedbackNotificationsMuted: function() {
return this.feedbackNotificationsMuted;
},
+ /**
+ * @return {boolean} Whether the suggestion notification is muted.
+ */
areSuggestionNotificationsMuted: function() {
return this.suggestionNotificationsMuted;
},
+ /**
+ * Set the message type to feedback and mute to true or false.
+ * @param {boolean} mute - Whether the feedback notification is muted.
+ */
setFeedbackNotificationPreferences: function(mute) {
this.saveChangeToBackend({
message_type: MESSAGE_TYPE_FEEDBACK,
mute: mute
});
},
+ /**
+ * Set the message type to suggestion and mute to true or false.
+ * @param {boolean} mute - Whether the suggestion notification is muted.
+ */
setSuggestionNotificationPreferences: function(mute) {
this.saveChangeToBackend({
message_type: MESSAGE_TYPE_SUGGESTION,
mute: mute
});
},
+ /**
+ * Save the change of message_type and mute to backend.
+ * @param {object} requestParams - Info about message_type and mute.
+ */
saveChangeToBackend: function(requestParams) {
var that = this;
var emailPreferencesUrl = UrlInterpolationService.interpolateUrl(
diff --git a/scripts/create_expression_parser.py b/scripts/create_expression_parser.py
index d34d2ebd5039f..0695d6835fd41 100644
--- a/scripts/create_expression_parser.py
+++ b/scripts/create_expression_parser.py
@@ -18,9 +18,7 @@
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
-import fileinput
import os
-import re
import subprocess
import python_utils
|
freedomofpress__securedrop-6051 | Alembic operations fail with multiple head revisions
## Description
All Alembic operations fail with Alembic error:
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
Cf. consistent recent failures of CI jobs `app-tests` and `staging-test-with-rebase` since #5974.
## Steps to Reproduce
`make test` on `develop`; open or push to a PR; etc.
## Expected Behavior
Alembic operations succeed and Alembic-based tests pass.
## Actual Behavior
All Alembic operations and tests fail with Alembic error:
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
## Comments
This is essentially an Alembic-level merge-conflict. PR forthcoming with the one-line fix.
| [
{
"content": "\"\"\"unique_index_for_instanceconfig_valid_until\n\nRevision ID: 1ddb81fb88c2\nRevises: 92fba0be98e9\nCreate Date: 2021-06-04 17:28:25.725563\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1ddb81fb88c2'\ndown_revision = '92fba0be98e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.create_index('ix_one_active_instance_config', [sa.text('valid_until IS NULL')], unique=True, sqlite_where=sa.text('valid_until IS NULL'))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.drop_index('ix_one_active_instance_config')\n\n # ### end Alembic commands ###\n",
"path": "securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py"
}
] | [
{
"content": "\"\"\"unique_index_for_instanceconfig_valid_until\n\nRevision ID: 1ddb81fb88c2\nRevises: 92fba0be98e9\nCreate Date: 2021-06-04 17:28:25.725563\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1ddb81fb88c2'\ndown_revision = 'b060f38c0c31'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.create_index('ix_one_active_instance_config', [sa.text('valid_until IS NULL')], unique=True, sqlite_where=sa.text('valid_until IS NULL'))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.drop_index('ix_one_active_instance_config')\n\n # ### end Alembic commands ###\n",
"path": "securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py"
}
] | diff --git a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
index 74342aed1e..ec65d3a49d 100644
--- a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
+++ b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
@@ -11,7 +11,7 @@
# revision identifiers, used by Alembic.
revision = '1ddb81fb88c2'
-down_revision = '92fba0be98e9'
+down_revision = 'b060f38c0c31'
branch_labels = None
depends_on = None
|
spack__spack-18268 | Installation issue: dbus (missing libsm dependency)
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
1. Title the issue "Installation issue: <name-of-the-package>".
2. Provide the information required below.
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
I am trying to install visit, and I am hitting an error when it tries to install dbus. This appears
to be due to dbus depending on libSM (and through that libuuid), but not declaring that dependency in Spack. So in my build of visit, the libuuid dependency is picked up and set to use the spack installed libuuid via some other package visit depends on, but dbus ends up using the system installed libSM, and there is a mismatch between the two. But the dbus
package should not be linking against system libSM.
### Steps to reproduce the issue
I am trying to install visit, and I am hitting an error when it tries to install dbus. This appears
to be
spack install [email protected]%[email protected] ^[email protected]
eventually aborts with
CCLD dbus-run-session
/lib/../lib64/libSM.so: undefined reference to `uuid_unparse_lower@UUID_1.0'
/lib/../lib64/libSM.so: undefined reference to `uuid_generate@UUID_1.0'
collect2: error: ld returned 1 exit status
Error appears due to the attempt to link the system /lib64/libSM.so
### Information on your system
spack debug report
* **Spack:** 0.14.2
* **Python:** 2.7.16
* **Platform:** linux-rhel7-broadwell
### Additional information
[spack-build-env.txt](https://github.com/spack/spack/files/5125717/spack-build-env.txt)
[spack-build-out.txt](https://github.com/spack/spack/files/5125718/spack-build-out.txt)
No maintainers for dbus
### General information
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [x ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [x ] I have uploaded the build log and environment files
- [ x] I have searched the issues of this repo and believe this is not a duplicate
| [
{
"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Dbus(Package):\n \"\"\"D-Bus is a message bus system, a simple way for applications to\n talk to one another. D-Bus supplies both a system daemon (for\n events such new hardware device printer queue ) and a\n per-user-login-session daemon (for general IPC needs among user\n applications). Also, the message bus is built on top of a\n general one-to-one message passing framework, which can be used\n by any two applications to communicate directly (without going\n through the message bus daemon).\"\"\"\n\n homepage = \"http://dbus.freedesktop.org/\"\n url = \"http://dbus.freedesktop.org/releases/dbus/dbus-1.8.8.tar.gz\"\n\n version('1.12.8', sha256='e2dc99e7338303393b6663a98320aba6a63421bcdaaf571c8022f815e5896eb3')\n version('1.11.2', sha256='5abc4c57686fa82669ad0039830788f9b03fdc4fff487f0ccf6c9d56ba2645c9')\n version('1.9.0', sha256='38ebc695b5cbbd239e0f149aa5d5395f0051a0fec1b74f21ff2921b22a31c171')\n version('1.8.8', sha256='dfab263649a979d0fff64a30cac374891a8e9940350e41f3bbd7679af32bd1fd')\n version('1.8.6', sha256='eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f')\n version('1.8.4', sha256='3ef63dc8d0111042071ee7f7bafa0650c6ce2d7be957ef0b7ec269495a651ff8')\n version('1.8.2', sha256='5689f7411165adc953f37974e276a3028db94447c76e8dd92efe910c6d3bae08')\n\n depends_on('pkgconfig', type='build')\n depends_on('expat')\n depends_on('glib')\n\n def install(self, spec, prefix):\n configure(\n \"--prefix=%s\" % prefix,\n \"--disable-systemd\",\n \"--disable-launchd\")\n make()\n make(\"install\")\n\n # dbus needs a machine id generated after install\n dbus_uuidgen = Executable(join_path(prefix.bin, 'dbus-uuidgen'))\n dbus_uuidgen('--ensure')\n",
"path": "var/spack/repos/builtin/packages/dbus/package.py"
}
] | [
{
"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Dbus(Package):\n \"\"\"D-Bus is a message bus system, a simple way for applications to\n talk to one another. D-Bus supplies both a system daemon (for\n events such new hardware device printer queue ) and a\n per-user-login-session daemon (for general IPC needs among user\n applications). Also, the message bus is built on top of a\n general one-to-one message passing framework, which can be used\n by any two applications to communicate directly (without going\n through the message bus daemon).\"\"\"\n\n homepage = \"http://dbus.freedesktop.org/\"\n url = \"http://dbus.freedesktop.org/releases/dbus/dbus-1.8.8.tar.gz\"\n\n version('1.12.8', sha256='e2dc99e7338303393b6663a98320aba6a63421bcdaaf571c8022f815e5896eb3')\n version('1.11.2', sha256='5abc4c57686fa82669ad0039830788f9b03fdc4fff487f0ccf6c9d56ba2645c9')\n version('1.9.0', sha256='38ebc695b5cbbd239e0f149aa5d5395f0051a0fec1b74f21ff2921b22a31c171')\n version('1.8.8', sha256='dfab263649a979d0fff64a30cac374891a8e9940350e41f3bbd7679af32bd1fd')\n version('1.8.6', sha256='eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f')\n version('1.8.4', sha256='3ef63dc8d0111042071ee7f7bafa0650c6ce2d7be957ef0b7ec269495a651ff8')\n version('1.8.2', sha256='5689f7411165adc953f37974e276a3028db94447c76e8dd92efe910c6d3bae08')\n\n depends_on('pkgconfig', type='build')\n depends_on('expat')\n depends_on('glib')\n depends_on('libsm')\n\n def install(self, spec, prefix):\n configure(\n \"--prefix=%s\" % prefix,\n \"--disable-systemd\",\n \"--disable-launchd\")\n make()\n make(\"install\")\n\n # dbus needs a machine id generated after install\n dbus_uuidgen = Executable(join_path(prefix.bin, 'dbus-uuidgen'))\n dbus_uuidgen('--ensure')\n",
"path": "var/spack/repos/builtin/packages/dbus/package.py"
}
] | diff --git a/var/spack/repos/builtin/packages/dbus/package.py b/var/spack/repos/builtin/packages/dbus/package.py
index 31495a06b5510a..f47f7f4b16265d 100644
--- a/var/spack/repos/builtin/packages/dbus/package.py
+++ b/var/spack/repos/builtin/packages/dbus/package.py
@@ -30,6 +30,7 @@ class Dbus(Package):
depends_on('pkgconfig', type='build')
depends_on('expat')
depends_on('glib')
+ depends_on('libsm')
def install(self, spec, prefix):
configure(
|
oobabooga__text-generation-webui-4905 | coqui_tts fails to load as assumes interactive sessions to accept ToS
### Describe the bug
When enabled coqui_tts prevents textgen from starting as it expects an interactive session for a user to accept a ToS agreement
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
- Enable coqui_tts
- Restart textgen
- Note that textgen never starts
- Check console logs
```
2023-12-12 22:13:22 INFO:Loading the extension "coqui_tts"...
[XTTS] Loading XTTS...
> You must agree to the terms of service to use this model.
| > Please see the terms of service at https://coqui.ai/cpml.txt
| > "I have read, understood and agreed to the Terms and Conditions." - [y/n]
```
- No way to accept non-interactively
### Screenshot
_No response_
### Logs
```shell
INFO: Started server process [37]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:5001 (Press CTRL+C to quit)
2023-12-12 22:13:18 DEBUG:Intercepting all calls to posthog.
2023-12-12 22:13:19 DEBUG:Creating Sentence Embedder...
2023-12-12 22:13:20 WARNING:Using embedded DuckDB without persistence: data will be transient
2023-12-12 22:13:22 DEBUG:Loading hyperparameters...
2023-12-12 22:13:22 INFO:Loading the extension "coqui_tts"...
[XTTS] Loading XTTS...
> You must agree to the terms of service to use this model.
| > Please see the terms of service at https://coqui.ai/cpml.txt
| > "I have read, understood and agreed to the Terms and Conditions." - [y/n]
```
### System Info
```shell
Latest official docker image running on server.
```
Note that a workaround for this is to remove coqui_tts and install "alltalk_tts" instead which seems to work without issue.
| [
{
"content": "import html\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport gradio as gr\n\nfrom modules import chat, shared, ui_chat\nfrom modules.logging_colors import logger\nfrom modules.ui import create_refresh_button\nfrom modules.utils import gradio\n\ntry:\n from TTS.api import TTS\n from TTS.utils.synthesizer import Synthesizer\nexcept ModuleNotFoundError:\n logger.error(\n \"Could not find the TTS module. Make sure to install the requirements for the coqui_tts extension.\"\n \"\\n\"\n \"\\nLinux / Mac:\\npip install -r extensions/coqui_tts/requirements.txt\\n\"\n \"\\nWindows:\\npip install -r extensions\\\\coqui_tts\\\\requirements.txt\\n\"\n \"\\n\"\n \"If you used the one-click installer, paste the command above in the terminal window launched after running the \\\"cmd_\\\" script. On Windows, that's \\\"cmd_windows.bat\\\".\"\n )\n\n raise\n\n\nparams = {\n \"activate\": True,\n \"autoplay\": True,\n \"show_text\": False,\n \"remove_trailing_dots\": False,\n \"voice\": \"female_01.wav\",\n \"language\": \"English\",\n \"model_name\": \"tts_models/multilingual/multi-dataset/xtts_v2\",\n \"device\": \"cuda\"\n}\n\nthis_dir = str(Path(__file__).parent.resolve())\nmodel = None\nwith open(Path(f\"{this_dir}/languages.json\"), encoding='utf8') as f:\n languages = json.load(f)\n\n\ndef get_available_voices():\n return sorted([voice.name for voice in Path(f\"{this_dir}/voices\").glob(\"*.wav\")])\n\n\ndef preprocess(raw_input):\n raw_input = html.unescape(raw_input)\n # raw_input = raw_input.strip(\"\\\"\")\n return raw_input\n\n\ndef new_split_into_sentences(self, text):\n sentences = self.seg.segment(text)\n if params['remove_trailing_dots']:\n sentences_without_dots = []\n for sentence in sentences:\n if sentence.endswith('.') and not sentence.endswith('...'):\n sentence = sentence[:-1]\n\n sentences_without_dots.append(sentence)\n\n return sentences_without_dots\n else:\n return sentences\n\n\nSynthesizer.split_into_sentences = new_split_into_sentences\n\n\ndef load_model():\n model = TTS(params[\"model_name\"]).to(params[\"device\"])\n return model\n\n\ndef remove_tts_from_history(history):\n for i, entry in enumerate(history['internal']):\n history['visible'][i] = [history['visible'][i][0], entry[1]]\n\n return history\n\n\ndef toggle_text_in_history(history):\n for i, entry in enumerate(history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = history['internal'][i][1]\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n\n return history\n\n\ndef random_sentence():\n with open(Path(\"extensions/coqui_tts/harvard_sentences.txt\")) as f:\n return random.choice(list(f))\n\n\ndef voice_preview(string):\n string = html.unescape(string) or random_sentence()\n\n output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n return f'<audio src=\"file/{output_file.as_posix()}?{int(time.time())}\" controls autoplay></audio>'\n\n\ndef history_modifier(history):\n # Remove autoplay from the last reply\n if len(history['internal']) > 0:\n history['visible'][-1] = [\n history['visible'][-1][0],\n history['visible'][-1][1].replace('controls autoplay>', 'controls>')\n ]\n\n return history\n\n\ndef state_modifier(state):\n if not params['activate']:\n return state\n\n state['stream'] = False\n return state\n\n\ndef input_modifier(string, state):\n if not params['activate']:\n return string\n\n shared.processing_message = \"*Is recording a voice message...*\"\n return string\n\n\ndef output_modifier(string, state):\n if not params['activate']:\n return string\n\n original_string = string\n string = preprocess(html.unescape(string))\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/coqui_tts/outputs/{state[\"character_menu\"]}_{int(time.time())}.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n return string\n\n\ndef custom_css():\n path_to_css = Path(f\"{this_dir}/style.css\")\n return open(path_to_css, 'r').read()\n\n\ndef setup():\n global model\n print(\"[XTTS] Loading XTTS...\")\n model = load_model()\n print(\"[XTTS] Done!\")\n Path(f\"{this_dir}/outputs\").mkdir(parents=True, exist_ok=True)\n\n\ndef ui():\n with gr.Accordion(\"Coqui TTS (XTTSv2)\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n with gr.Row():\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing \".\" from text segments before converting to audio')\n\n with gr.Row():\n with gr.Row():\n voice = gr.Dropdown(get_available_voices(), label=\"Voice wav\", value=params[\"voice\"])\n create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params[\"voice\"]}, 'refresh-button')\n\n language = gr.Dropdown(languages.keys(), label=\"Language\", value=params[\"language\"])\n\n with gr.Row():\n preview_text = gr.Text(show_label=False, placeholder=\"Preview text\", elem_id=\"silero_preview_text\")\n preview_play = gr.Button(\"Preview\")\n preview_audio = gr.HTML(visible=False)\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(\n lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(\n remove_tts_from_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(\n lambda x: params.update({\"show_text\": x}), show_text, None).then(\n toggle_text_in_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n remove_trailing_dots.change(lambda x: params.update({\"remove_trailing_dots\": x}), remove_trailing_dots, None)\n voice.change(lambda x: params.update({\"voice\": x}), voice, None)\n language.change(lambda x: params.update({\"language\": x}), language, None)\n\n # Play preview\n preview_text.submit(voice_preview, preview_text, preview_audio)\n preview_play.click(voice_preview, preview_text, preview_audio)\n",
"path": "extensions/coqui_tts/script.py"
}
] | [
{
"content": "import os\nimport html\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport gradio as gr\n\nfrom modules import chat, shared, ui_chat\nfrom modules.logging_colors import logger\nfrom modules.ui import create_refresh_button\nfrom modules.utils import gradio\n\ntry:\n from TTS.api import TTS\n from TTS.utils.synthesizer import Synthesizer\nexcept ModuleNotFoundError:\n logger.error(\n \"Could not find the TTS module. Make sure to install the requirements for the coqui_tts extension.\"\n \"\\n\"\n \"\\nLinux / Mac:\\npip install -r extensions/coqui_tts/requirements.txt\\n\"\n \"\\nWindows:\\npip install -r extensions\\\\coqui_tts\\\\requirements.txt\\n\"\n \"\\n\"\n \"If you used the one-click installer, paste the command above in the terminal window launched after running the \\\"cmd_\\\" script. On Windows, that's \\\"cmd_windows.bat\\\".\"\n )\n\n raise\n\nos.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n\nparams = {\n \"activate\": True,\n \"autoplay\": True,\n \"show_text\": False,\n \"remove_trailing_dots\": False,\n \"voice\": \"female_01.wav\",\n \"language\": \"English\",\n \"model_name\": \"tts_models/multilingual/multi-dataset/xtts_v2\",\n \"device\": \"cuda\"\n}\n\nthis_dir = str(Path(__file__).parent.resolve())\nmodel = None\nwith open(Path(f\"{this_dir}/languages.json\"), encoding='utf8') as f:\n languages = json.load(f)\n\n\ndef get_available_voices():\n return sorted([voice.name for voice in Path(f\"{this_dir}/voices\").glob(\"*.wav\")])\n\n\ndef preprocess(raw_input):\n raw_input = html.unescape(raw_input)\n # raw_input = raw_input.strip(\"\\\"\")\n return raw_input\n\n\ndef new_split_into_sentences(self, text):\n sentences = self.seg.segment(text)\n if params['remove_trailing_dots']:\n sentences_without_dots = []\n for sentence in sentences:\n if sentence.endswith('.') and not sentence.endswith('...'):\n sentence = sentence[:-1]\n\n sentences_without_dots.append(sentence)\n\n return sentences_without_dots\n else:\n return sentences\n\n\nSynthesizer.split_into_sentences = new_split_into_sentences\n\n\ndef load_model():\n model = TTS(params[\"model_name\"]).to(params[\"device\"])\n return model\n\n\ndef remove_tts_from_history(history):\n for i, entry in enumerate(history['internal']):\n history['visible'][i] = [history['visible'][i][0], entry[1]]\n\n return history\n\n\ndef toggle_text_in_history(history):\n for i, entry in enumerate(history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = history['internal'][i][1]\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n history['visible'][i] = [history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n\n return history\n\n\ndef random_sentence():\n with open(Path(\"extensions/coqui_tts/harvard_sentences.txt\")) as f:\n return random.choice(list(f))\n\n\ndef voice_preview(string):\n string = html.unescape(string) or random_sentence()\n\n output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n return f'<audio src=\"file/{output_file.as_posix()}?{int(time.time())}\" controls autoplay></audio>'\n\n\ndef history_modifier(history):\n # Remove autoplay from the last reply\n if len(history['internal']) > 0:\n history['visible'][-1] = [\n history['visible'][-1][0],\n history['visible'][-1][1].replace('controls autoplay>', 'controls>')\n ]\n\n return history\n\n\ndef state_modifier(state):\n if not params['activate']:\n return state\n\n state['stream'] = False\n return state\n\n\ndef input_modifier(string, state):\n if not params['activate']:\n return string\n\n shared.processing_message = \"*Is recording a voice message...*\"\n return string\n\n\ndef output_modifier(string, state):\n if not params['activate']:\n return string\n\n original_string = string\n string = preprocess(html.unescape(string))\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/coqui_tts/outputs/{state[\"character_menu\"]}_{int(time.time())}.wav')\n model.tts_to_file(\n text=string,\n file_path=output_file,\n speaker_wav=[f\"{this_dir}/voices/{params['voice']}\"],\n language=languages[params[\"language\"]]\n )\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n return string\n\n\ndef custom_css():\n path_to_css = Path(f\"{this_dir}/style.css\")\n return open(path_to_css, 'r').read()\n\n\ndef setup():\n global model\n print(\"[XTTS] Loading XTTS...\")\n model = load_model()\n print(\"[XTTS] Done!\")\n Path(f\"{this_dir}/outputs\").mkdir(parents=True, exist_ok=True)\n\n\ndef ui():\n with gr.Accordion(\"Coqui TTS (XTTSv2)\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n with gr.Row():\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing \".\" from text segments before converting to audio')\n\n with gr.Row():\n with gr.Row():\n voice = gr.Dropdown(get_available_voices(), label=\"Voice wav\", value=params[\"voice\"])\n create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params[\"voice\"]}, 'refresh-button')\n\n language = gr.Dropdown(languages.keys(), label=\"Language\", value=params[\"language\"])\n\n with gr.Row():\n preview_text = gr.Text(show_label=False, placeholder=\"Preview text\", elem_id=\"silero_preview_text\")\n preview_play = gr.Button(\"Preview\")\n preview_audio = gr.HTML(visible=False)\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(\n lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(\n remove_tts_from_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(\n lambda x: params.update({\"show_text\": x}), show_text, None).then(\n toggle_text_in_history, gradio('history'), gradio('history')).then(\n chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(\n chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n remove_trailing_dots.change(lambda x: params.update({\"remove_trailing_dots\": x}), remove_trailing_dots, None)\n voice.change(lambda x: params.update({\"voice\": x}), voice, None)\n language.change(lambda x: params.update({\"language\": x}), language, None)\n\n # Play preview\n preview_text.submit(voice_preview, preview_text, preview_audio)\n preview_play.click(voice_preview, preview_text, preview_audio)\n",
"path": "extensions/coqui_tts/script.py"
}
] | diff --git a/extensions/coqui_tts/script.py b/extensions/coqui_tts/script.py
index 81e85117d4..682cb94ca4 100644
--- a/extensions/coqui_tts/script.py
+++ b/extensions/coqui_tts/script.py
@@ -1,3 +1,4 @@
+import os
import html
import json
import random
@@ -26,6 +27,7 @@
raise
+os.environ["COQUI_TOS_AGREED"] = "1"
params = {
"activate": True,
|
boto__botocore-1117 | Support Python 3.6
Python 3.6 got released, and some distro (like Fedora) are swithcing to it.
| [
{
"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ),\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n",
"path": "setup.py"
}
] | diff --git a/.travis.yml b/.travis.yml
index b4c6cd4f63..75633c23f1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ python:
- "3.3"
- "3.4"
- "3.5"
- - "3.6-dev"
+ - "3.6"
sudo: false
before_install:
- if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
diff --git a/requirements.txt b/requirements.txt
index 88d780206d..772ad4129c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-tox>=2.3.1,<3.0.0
+tox>=2.5.0,<3.0.0
python-dateutil>=2.1,<3.0.0
nose==1.3.0
mock==1.3.0
diff --git a/setup.py b/setup.py
index ec976c7656..e16b6245f9 100644
--- a/setup.py
+++ b/setup.py
@@ -57,5 +57,6 @@
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
),
)
diff --git a/tox.ini b/tox.ini
index 8e0fb21423..17e3fb91a9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,py33,py34,py35
+envlist = py26,py27,py33,py34,py35,py36
skipsdist = True
|
huggingface__dataset-viewer-2409 | Retry jobs that finish with `ClientConnection` error?
Maybe here: https://github.com/huggingface/datasets-server/blob/f311a9212aaa91dd0373e5c2d4f5da9b6bdabcb5/chart/env/prod.yaml#L209
Internal conversation on Slack: https://huggingface.slack.com/archives/C0311GZ7R6K/p1698224875005729
Anyway: I'm wondering if we can have the error now that the dataset scripts are disabled by default.
| [
{
"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nCACHE_COLLECTION_RESPONSES = \"cachedResponsesBlue\"\nCACHE_MONGOENGINE_ALIAS = \"cache\"\nHF_DATASETS_CACHE_APPNAME = \"hf_datasets_cache\"\nPARQUET_METADATA_CACHE_APPNAME = \"datasets_server_parquet_metadata\"\nDESCRIPTIVE_STATISTICS_CACHE_APPNAME = \"datasets_server_descriptive_statistics\"\nDUCKDB_INDEX_CACHE_APPNAME = \"datasets_server_duckdb_index\"\nDUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = \"downloads\"\nDUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = \"job_runner\"\nCACHE_METRICS_COLLECTION = \"cacheTotalMetric\"\nQUEUE_METRICS_COLLECTION = \"jobTotalMetric\"\nMETRICS_MONGOENGINE_ALIAS = \"metrics\"\nQUEUE_COLLECTION_JOBS = \"jobsBlue\"\nQUEUE_COLLECTION_LOCKS = \"locks\"\nQUEUE_MONGOENGINE_ALIAS = \"queue\"\nQUEUE_TTL_SECONDS = 600 # 10 minutes\nLOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour\n\nMAX_FAILED_RUNS = 3\nDATASET_SEPARATOR = \"--\"\nDEFAULT_DIFFICULTY = 50\nDEFAULT_DIFFICULTY_MAX = 100\nDEFAULT_DIFFICULTY_MIN = 0\nDEFAULT_INPUT_TYPE = \"dataset\"\nDEFAULT_JOB_RUNNER_VERSION = 1\nDIFFICULTY_BONUS_BY_FAILED_RUNS = 20\nMIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000\n\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100\nPARQUET_REVISION = \"refs/convert/parquet\"\n\nERROR_CODES_TO_RETRY = {\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n \"LockedDatasetTimeoutError\",\n \"StreamingRowsError\",\n}\n\nEXTERNAL_DATASET_SCRIPT_PATTERN = \"datasets_modules/datasets\"\n\n# Arrays are not immutable, we have to take care of not modifying them\n# Anyway: in all this file, we allow constant reassignment (no use of Final)\nCONFIG_HAS_VIEWER_KINDS = [\"config-size\"]\nCONFIG_INFO_KINDS = [\"config-info\"]\nCONFIG_PARQUET_METADATA_KINDS = [\"config-parquet-metadata\"]\nCONFIG_PARQUET_AND_METADATA_KINDS = [\"config-parquet\", \"config-parquet-metadata\"]\nCONFIG_SPLIT_NAMES_KINDS = [\"config-split-names-from-info\", \"config-split-names-from-streaming\"]\nDATASET_CONFIG_NAMES_KINDS = [\"dataset-config-names\"]\nDATASET_INFO_KINDS = [\"dataset-info\"]\nSPLIT_DUCKDB_INDEX_KINDS = [\"split-duckdb-index\"]\nSPLIT_HAS_PREVIEW_KINDS = [\"split-first-rows-from-streaming\", \"split-first-rows-from-parquet\"]\nSPLIT_HAS_SEARCH_KINDS = [\"split-duckdb-index\"]\nPARALLEL_STEPS_LISTS = [\n CONFIG_SPLIT_NAMES_KINDS,\n SPLIT_HAS_PREVIEW_KINDS,\n]\n\nCROISSANT_MAX_CONFIGS = 100\nMAX_NUM_ROWS_PER_PAGE = 100\n",
"path": "libs/libcommon/src/libcommon/constants.py"
}
] | [
{
"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nCACHE_COLLECTION_RESPONSES = \"cachedResponsesBlue\"\nCACHE_MONGOENGINE_ALIAS = \"cache\"\nHF_DATASETS_CACHE_APPNAME = \"hf_datasets_cache\"\nPARQUET_METADATA_CACHE_APPNAME = \"datasets_server_parquet_metadata\"\nDESCRIPTIVE_STATISTICS_CACHE_APPNAME = \"datasets_server_descriptive_statistics\"\nDUCKDB_INDEX_CACHE_APPNAME = \"datasets_server_duckdb_index\"\nDUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = \"downloads\"\nDUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = \"job_runner\"\nCACHE_METRICS_COLLECTION = \"cacheTotalMetric\"\nQUEUE_METRICS_COLLECTION = \"jobTotalMetric\"\nMETRICS_MONGOENGINE_ALIAS = \"metrics\"\nQUEUE_COLLECTION_JOBS = \"jobsBlue\"\nQUEUE_COLLECTION_LOCKS = \"locks\"\nQUEUE_MONGOENGINE_ALIAS = \"queue\"\nQUEUE_TTL_SECONDS = 600 # 10 minutes\nLOCK_TTL_SECONDS_NO_OWNER = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_START_JOB = 600 # 10 minutes\nLOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 # 1 hour\n\nMAX_FAILED_RUNS = 3\nDATASET_SEPARATOR = \"--\"\nDEFAULT_DIFFICULTY = 50\nDEFAULT_DIFFICULTY_MAX = 100\nDEFAULT_DIFFICULTY_MIN = 0\nDEFAULT_INPUT_TYPE = \"dataset\"\nDEFAULT_JOB_RUNNER_VERSION = 1\nDIFFICULTY_BONUS_BY_FAILED_RUNS = 20\nMIN_BYTES_FOR_BONUS_DIFFICULTY = 3_000_000_000\n\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100\nPROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100\nPARQUET_REVISION = \"refs/convert/parquet\"\n\nERROR_CODES_TO_RETRY = {\n \"ConnectionError\",\n \"CreateCommitError\",\n \"ExternalServerError\",\n \"JobManagerCrashedError\",\n \"LockedDatasetTimeoutError\",\n \"StreamingRowsError\",\n}\n\nEXTERNAL_DATASET_SCRIPT_PATTERN = \"datasets_modules/datasets\"\n\n# Arrays are not immutable, we have to take care of not modifying them\n# Anyway: in all this file, we allow constant reassignment (no use of Final)\nCONFIG_HAS_VIEWER_KINDS = [\"config-size\"]\nCONFIG_INFO_KINDS = [\"config-info\"]\nCONFIG_PARQUET_METADATA_KINDS = [\"config-parquet-metadata\"]\nCONFIG_PARQUET_AND_METADATA_KINDS = [\"config-parquet\", \"config-parquet-metadata\"]\nCONFIG_SPLIT_NAMES_KINDS = [\"config-split-names-from-info\", \"config-split-names-from-streaming\"]\nDATASET_CONFIG_NAMES_KINDS = [\"dataset-config-names\"]\nDATASET_INFO_KINDS = [\"dataset-info\"]\nSPLIT_DUCKDB_INDEX_KINDS = [\"split-duckdb-index\"]\nSPLIT_HAS_PREVIEW_KINDS = [\"split-first-rows-from-streaming\", \"split-first-rows-from-parquet\"]\nSPLIT_HAS_SEARCH_KINDS = [\"split-duckdb-index\"]\nPARALLEL_STEPS_LISTS = [\n CONFIG_SPLIT_NAMES_KINDS,\n SPLIT_HAS_PREVIEW_KINDS,\n]\n\nMAX_NUM_ROWS_PER_PAGE = 100\n",
"path": "libs/libcommon/src/libcommon/constants.py"
}
] | diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index 0a17db2ce6..075c5529af 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -36,6 +36,7 @@
PARQUET_REVISION = "refs/convert/parquet"
ERROR_CODES_TO_RETRY = {
+ "ConnectionError",
"CreateCommitError",
"ExternalServerError",
"JobManagerCrashedError",
|
getredash__redash-4189 | JIRA setup: change password field name to "API Token"
While a password can be used there, it's not recommended and eventually will be deprecated.
| [
{
"content": "import re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\n\n# TODO: make this more general and move into __init__.py\nclass ResultSet(object):\n def __init__(self):\n self.columns = OrderedDict()\n self.rows = []\n\n def add_row(self, row):\n for key in row.keys():\n self.add_column(key)\n\n self.rows.append(row)\n\n def add_column(self, column, column_type=TYPE_STRING):\n if column not in self.columns:\n self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}\n\n def to_json(self):\n return json_dumps({'rows': self.rows, 'columns': self.columns.values()})\n\n def merge(self, set):\n self.rows = self.rows + set.rows\n\n\ndef parse_issue(issue, field_mapping):\n result = OrderedDict()\n result['key'] = issue['key']\n\n for k, v in issue['fields'].iteritems():#\n output_name = field_mapping.get_output_field_name(k)\n member_names = field_mapping.get_dict_members(k)\n\n if isinstance(v, dict):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n if member_name in v:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]\n\n else:\n # these special mapping rules are kept for backwards compatibility\n if 'key' in v:\n result['{}_key'.format(output_name)] = v['key']\n if 'name' in v:\n result['{}_name'.format(output_name)] = v['name']\n\n if k in v:\n result[output_name] = v[k]\n\n if 'watchCount' in v:\n result[output_name] = v['watchCount']\n\n elif isinstance(v, list):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n listValues = []\n for listItem in v:\n if isinstance(listItem, dict):\n if member_name in listItem:\n listValues.append(listItem[member_name])\n if len(listValues) > 0:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = ','.join(listValues)\n\n else:\n # otherwise support list values only for non-dict items\n listValues = []\n for listItem in v:\n if not isinstance(listItem, dict):\n listValues.append(listItem)\n if len(listValues) > 0:\n result[output_name] = ','.join(listValues)\n\n else:\n result[output_name] = v\n\n return result\n\n\ndef parse_issues(data, field_mapping):\n results = ResultSet()\n\n for issue in data['issues']:\n results.add_row(parse_issue(issue, field_mapping))\n\n return results\n\n\ndef parse_count(data):\n results = ResultSet()\n results.add_row({'count': data['total']})\n return results\n\n\nclass FieldMapping:\n\n def __init__(cls, query_field_mapping):\n cls.mapping = []\n for k, v in query_field_mapping.iteritems():\n field_name = k\n member_name = None\n\n # check for member name contained in field name\n member_parser = re.search('(\\w+)\\.(\\w+)', k)\n if (member_parser):\n field_name = member_parser.group(1)\n member_name = member_parser.group(2)\n\n cls.mapping.append({\n 'field_name': field_name,\n 'member_name': member_name,\n 'output_field_name': v\n })\n\n def get_output_field_name(cls, field_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and not item['member_name']:\n return item['output_field_name']\n return field_name\n\n def get_dict_members(cls, field_name):\n member_names = []\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name']:\n member_names.append(item['member_name'])\n return member_names\n\n def get_dict_output_field_name(cls, field_name, member_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name'] == member_name:\n return item['output_field_name']\n return None\n\n\nclass JiraJQL(BaseHTTPQueryRunner):\n noop_query = '{\"queryType\": \"count\"}'\n response_error = \"JIRA returned unexpected status code\"\n requires_authentication = True\n url_title = 'JIRA URL'\n username_title = 'Username'\n password_title = 'Password'\n\n @classmethod\n def name(cls):\n return \"JIRA (JQL)\"\n\n def __init__(self, configuration):\n super(JiraJQL, self).__init__(configuration)\n self.syntax = 'json'\n\n def run_query(self, query, user):\n jql_url = '{}/rest/api/2/search'.format(self.configuration[\"url\"])\n\n try:\n query = json_loads(query)\n query_type = query.pop('queryType', 'select')\n field_mapping = FieldMapping(query.pop('fieldMapping', {}))\n\n if query_type == 'count':\n query['maxResults'] = 1\n query['fields'] = ''\n else:\n query['maxResults'] = query.get('maxResults', 1000)\n\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n\n if query_type == 'count':\n results = parse_count(data)\n else:\n results = parse_issues(data, field_mapping)\n index = data['startAt'] + data['maxResults']\n\n while data['total'] > index:\n query['startAt'] = index\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n index = data['startAt'] + data['maxResults']\n\n addl_results = parse_issues(data, field_mapping)\n results.merge(addl_results)\n\n return results.to_json(), None\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(JiraJQL)\n",
"path": "redash/query_runner/jql.py"
}
] | [
{
"content": "import re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\n\n# TODO: make this more general and move into __init__.py\nclass ResultSet(object):\n def __init__(self):\n self.columns = OrderedDict()\n self.rows = []\n\n def add_row(self, row):\n for key in row.keys():\n self.add_column(key)\n\n self.rows.append(row)\n\n def add_column(self, column, column_type=TYPE_STRING):\n if column not in self.columns:\n self.columns[column] = {'name': column, 'type': column_type, 'friendly_name': column}\n\n def to_json(self):\n return json_dumps({'rows': self.rows, 'columns': self.columns.values()})\n\n def merge(self, set):\n self.rows = self.rows + set.rows\n\n\ndef parse_issue(issue, field_mapping):\n result = OrderedDict()\n result['key'] = issue['key']\n\n for k, v in issue['fields'].iteritems():#\n output_name = field_mapping.get_output_field_name(k)\n member_names = field_mapping.get_dict_members(k)\n\n if isinstance(v, dict):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n if member_name in v:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]\n\n else:\n # these special mapping rules are kept for backwards compatibility\n if 'key' in v:\n result['{}_key'.format(output_name)] = v['key']\n if 'name' in v:\n result['{}_name'.format(output_name)] = v['name']\n\n if k in v:\n result[output_name] = v[k]\n\n if 'watchCount' in v:\n result[output_name] = v['watchCount']\n\n elif isinstance(v, list):\n if len(member_names) > 0:\n # if field mapping with dict member mappings defined get value of each member\n for member_name in member_names:\n listValues = []\n for listItem in v:\n if isinstance(listItem, dict):\n if member_name in listItem:\n listValues.append(listItem[member_name])\n if len(listValues) > 0:\n result[field_mapping.get_dict_output_field_name(k, member_name)] = ','.join(listValues)\n\n else:\n # otherwise support list values only for non-dict items\n listValues = []\n for listItem in v:\n if not isinstance(listItem, dict):\n listValues.append(listItem)\n if len(listValues) > 0:\n result[output_name] = ','.join(listValues)\n\n else:\n result[output_name] = v\n\n return result\n\n\ndef parse_issues(data, field_mapping):\n results = ResultSet()\n\n for issue in data['issues']:\n results.add_row(parse_issue(issue, field_mapping))\n\n return results\n\n\ndef parse_count(data):\n results = ResultSet()\n results.add_row({'count': data['total']})\n return results\n\n\nclass FieldMapping:\n\n def __init__(cls, query_field_mapping):\n cls.mapping = []\n for k, v in query_field_mapping.iteritems():\n field_name = k\n member_name = None\n\n # check for member name contained in field name\n member_parser = re.search('(\\w+)\\.(\\w+)', k)\n if (member_parser):\n field_name = member_parser.group(1)\n member_name = member_parser.group(2)\n\n cls.mapping.append({\n 'field_name': field_name,\n 'member_name': member_name,\n 'output_field_name': v\n })\n\n def get_output_field_name(cls, field_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and not item['member_name']:\n return item['output_field_name']\n return field_name\n\n def get_dict_members(cls, field_name):\n member_names = []\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name']:\n member_names.append(item['member_name'])\n return member_names\n\n def get_dict_output_field_name(cls, field_name, member_name):\n for item in cls.mapping:\n if item['field_name'] == field_name and item['member_name'] == member_name:\n return item['output_field_name']\n return None\n\n\nclass JiraJQL(BaseHTTPQueryRunner):\n noop_query = '{\"queryType\": \"count\"}'\n response_error = \"JIRA returned unexpected status code\"\n requires_authentication = True\n url_title = 'JIRA URL'\n username_title = 'Username'\n password_title = 'API Token'\n\n @classmethod\n def name(cls):\n return \"JIRA (JQL)\"\n\n def __init__(self, configuration):\n super(JiraJQL, self).__init__(configuration)\n self.syntax = 'json'\n\n def run_query(self, query, user):\n jql_url = '{}/rest/api/2/search'.format(self.configuration[\"url\"])\n\n try:\n query = json_loads(query)\n query_type = query.pop('queryType', 'select')\n field_mapping = FieldMapping(query.pop('fieldMapping', {}))\n\n if query_type == 'count':\n query['maxResults'] = 1\n query['fields'] = ''\n else:\n query['maxResults'] = query.get('maxResults', 1000)\n\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n\n if query_type == 'count':\n results = parse_count(data)\n else:\n results = parse_issues(data, field_mapping)\n index = data['startAt'] + data['maxResults']\n\n while data['total'] > index:\n query['startAt'] = index\n response, error = self.get_response(jql_url, params=query)\n if error is not None:\n return None, error\n\n data = response.json()\n index = data['startAt'] + data['maxResults']\n\n addl_results = parse_issues(data, field_mapping)\n results.merge(addl_results)\n\n return results.to_json(), None\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(JiraJQL)\n",
"path": "redash/query_runner/jql.py"
}
] | diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py
index 76e707e3a3..47a47b2fe6 100644
--- a/redash/query_runner/jql.py
+++ b/redash/query_runner/jql.py
@@ -144,7 +144,7 @@ class JiraJQL(BaseHTTPQueryRunner):
requires_authentication = True
url_title = 'JIRA URL'
username_title = 'Username'
- password_title = 'Password'
+ password_title = 'API Token'
@classmethod
def name(cls):
|
conda__conda-3740 | conda env create giving ImportError for yaml package
`conda env create` suddenly started giving `"ImportError: No module named 'yaml'"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743
I changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error.
Is this something from a recent release?
conda env create giving ImportError for yaml package
`conda env create` suddenly started giving `"ImportError: No module named 'yaml'"` with latest miniconda on my TravisCI builbs: https://travis-ci.org/leouieda/website/builds/170917743
I changed nothing significant in my code. Tried rebuilding previous passing builds and started getting the same error.
Is this something from a recent release?
| [
{
"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\nimport yaml\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n",
"path": "conda_env/yaml.py"
}
] | [
{
"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\n\nfrom conda.common.yaml import get_yaml\nyaml = get_yaml()\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n",
"path": "conda_env/yaml.py"
}
] | diff --git a/conda_env/yaml.py b/conda_env/yaml.py
index 74a462f3579..fbe8dc7e088 100644
--- a/conda_env/yaml.py
+++ b/conda_env/yaml.py
@@ -5,7 +5,9 @@
"""
from __future__ import absolute_import, print_function
from collections import OrderedDict
-import yaml
+
+from conda.common.yaml import get_yaml
+yaml = get_yaml()
def represent_ordereddict(dumper, data):
|
typeddjango__django-stubs-1343 | Next release planning (1.14.0)
Tracking a few regressions in [version 1.13.2](https://github.com/typeddjango/django-stubs/releases/tag/1.13.2), we should probably get out a release quickly once these are resolved.
* #1335
* Fixes #1333
* Fixes #1336
* https://github.com/typeddjango/django-stubs/pull/1331
* Fixes #1330
* #1345
* Fixes #1327
We can update to mypy 0.991 thanks to @flaeppe, and will call the next version 1.14.0.
* #1329
* Unblocked #1260
* Fixes #1261
Also some nice to have PRs still waiting for community reviewers:
* #1309
* #1308
* #1315
| [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=0.991,<1.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.13.2\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.980\",\n \"django\",\n \"django-stubs-ext>=0.7.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nextras_require = {\n \"compatible-mypy\": [\"mypy>=0.991,<1.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"1.14.0\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Nikita Sobolev\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/README.md b/README.md
index 65c83a392..f75a1e57a 100644
--- a/README.md
+++ b/README.md
@@ -51,6 +51,7 @@ We rely on different `django` and `mypy` versions:
| django-stubs | mypy version | django version | python version
|--------------| ---- | ---- | ---- |
+| 1.14.0 | 0.990+ | 3.2.x or 4.0.x or 4.1.x | ^3.7
| 1.13.0 | 0.980+ | 3.2.x or 4.0.x or 4.1.x | ^3.7
| 1.12.0 | 0.931+ | 3.2.x or 4.0.x | ^3.7
| 1.11.0 | 0.931+ | 3.2.x | ^3.7
diff --git a/setup.py b/setup.py
index f685e7d4e..19ec016db 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ def find_stub_files(name: str) -> List[str]:
setup(
name="django-stubs",
- version="1.13.2",
+ version="1.14.0",
description="Mypy stubs for Django",
long_description=readme,
long_description_content_type="text/markdown",
|
pytorch__TensorRT-371 | 🐛 [Bug] An error occurs in CompileGraph when gpu_id == 1
When I tried to Complie on the second GPU in a multi-GPU environment, an error occurred. The code sample used is as follows.
```cpp
void load(const std::string& model_path, int64_t gpu_id, int64_t opt_batch_size) {
torch::jit::Module module = torch::jit::load(model_path);
torch::Device device = (torch::cuda::is_available() ? torch::Device(torch::kCUDA, gpu_id) : torch::Device(torch::kCPU));
module.to(device, torch::kHalf);
module.eval();
std::vector<int64_t> in_opt = { opt_batch_size, INPUT_CHANNEL_NUM, BOARD_WIDTH, BOARD_WIDTH };
trtorch::CompileSpec::InputRange range(in_opt);
trtorch::CompileSpec info({ range });
info.op_precision = torch::kHalf;
info.device.gpu_id = gpu_id;
module = trtorch::CompileGraph(module, info);
}
```
#### Error1
I called this function with gpu_id = 1. I got the following error:
```
terminate called after throwing an instance of 'trtorch::Error'
what(): [enforce fail at core/conversion/conversionctx/ConversionCtx.cpp:107] Expected cudaSetDevice(settings.device.gpu_id) to be true but got false
Unable to set gpu id: 1
```
I think this line is the cause.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/conversion/conversionctx/ConversionCtx.cpp#L112
`cudaSetDevice` returns `cudaSuccess` (= 0) on success. However, `TRTORCH_CHECK` judges success or failure as a Boolean type.
I fixed it as follows and rebuilt it so that this error disappeared.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..bc5bf68 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -109,7 +109,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setEngineCapability(settings.capability);
if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
}
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
```
You may also use `set_device`.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/compiler.cpp#L176-L178
#### Error2
After making the above fix, I get the following error:
```
ERROR: [TRTorch Conversion Context] - Builder was created on device different than current device.
```
I changed `cudaSetDevice` to do it at the beginning of the function and it worked fine.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..09a419c 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -47,6 +47,10 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
util::logging::get_logger().get_reportable_severity(),
util::logging::get_logger().get_is_colored_output_on()) {
// TODO: Support FP16 and FP32 from JIT information
+ if (settings.device.gpu_id) {
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
+ }
+
builder = nvinfer1::createInferBuilder(logger);
net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
@@ -108,10 +112,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setDefaultDeviceType(settings.device.device_type);
cfg->setEngineCapability(settings.capability);
- if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
- }
-
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
auto nbDLACores = builder->getNbDLACores();
TRTORCH_CHECK(
```
It's working, but I'm not sure if this is a good fix as there may be other side effects as well.
I would appreciate it if you could respond appropriately.
| [
{
"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n",
"path": "py/trtorch/_compiler.py"
}
] | [
{
"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n\ndef set_device(gpu_id):\n trtorch._C.set_device(gpu_id)\n",
"path": "py/trtorch/_compiler.py"
}
] | diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff236921c2..9d47026c60 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -47,6 +47,11 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
util::logging::get_logger().get_reportable_severity(),
util::logging::get_logger().get_is_colored_output_on()) {
// TODO: Support FP16 and FP32 from JIT information
+ if (settings.device.gpu_id) {
+ TRTORCH_CHECK(
+ cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
+ }
+
builder = nvinfer1::createInferBuilder(logger);
net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
@@ -108,10 +113,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setDefaultDeviceType(settings.device.device_type);
cfg->setEngineCapability(settings.capability);
- if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
- }
-
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
auto nbDLACores = builder->getNbDLACores();
TRTORCH_CHECK(
diff --git a/docsrc/py_api/trtorch.rst b/docsrc/py_api/trtorch.rst
index d7376cb2f0..6063b4c69c 100644
--- a/docsrc/py_api/trtorch.rst
+++ b/docsrc/py_api/trtorch.rst
@@ -11,6 +11,8 @@ trtorch
Functions
------------
+.. autofunction:: set_device
+
.. autofunction:: compile
.. autofunction:: convert_method_to_trt_engine
diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py
index 76a7923ca2..fdb6fc2e80 100644
--- a/py/trtorch/_compiler.py
+++ b/py/trtorch/_compiler.py
@@ -156,3 +156,6 @@ def get_build_info() -> str:
build_info = trtorch._C.get_build_info()
build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
return build_info
+
+def set_device(gpu_id):
+ trtorch._C.set_device(gpu_id)
diff --git a/py/trtorch/csrc/trtorch_py.cpp b/py/trtorch/csrc/trtorch_py.cpp
index 420e27cccc..418423db41 100644
--- a/py/trtorch/csrc/trtorch_py.cpp
+++ b/py/trtorch/csrc/trtorch_py.cpp
@@ -15,6 +15,10 @@ namespace py = pybind11;
namespace trtorch {
namespace pyapi {
+void set_device(const int device_id) {
+ core::set_device(device_id);
+}
+
torch::jit::Module CompileGraph(const torch::jit::Module& mod, CompileSpec& info) {
py::gil_scoped_acquire gil;
auto trt_mod = core::CompileGraph(mod, info.toInternalCompileSpec());
@@ -146,6 +150,7 @@ PYBIND11_MODULE(_C, m) {
m.def("_get_is_colored_output_on", &logging::get_is_colored_output_on, "Get if the logging output will be colored");
m.def("_set_is_colored_output_on", &logging::set_is_colored_output_on, "Set if the logging output should be colored");
m.def("_log", &logging::log, "Add a message to the logger");
+ m.def("set_device", &trtorch::pyapi::set_device, "Set CUDA device id");
py::enum_<core::util::logging::LogLevel>(m, "LogLevel", py::arithmetic())
.value("INTERNAL_ERROR", core::util::logging::LogLevel::kINTERNAL_ERROR)
diff --git a/tests/py/BUILD b/tests/py/BUILD
index d8798f0175..2f20daaf67 100644
--- a/tests/py/BUILD
+++ b/tests/py/BUILD
@@ -15,9 +15,23 @@ py_test(
"test_api.py",
"model_test_case.py"
] + select({
- ":aarch64_linux": [
- "test_api_dla.py"
- ],
+ ":aarch64_linux": [
+ "test_api_dla.py"
+ ],
+ "//conditions:default" : []
+ }),
+ deps = [
+ requirement("torchvision")
+ ]
+)
+
+# Following multi_gpu test is only targeted for multi-gpu configurations. It is not included in the test suite by default.
+py_test(
+ name = "test_multi_gpu",
+ srcs = [
+ "test_multi_gpu.py",
+ "model_test_case.py"
+ ],
"//conditions:default" : []
}),
deps = [
diff --git a/tests/py/test_multi_gpu.py b/tests/py/test_multi_gpu.py
new file mode 100644
index 0000000000..4fb433f441
--- /dev/null
+++ b/tests/py/test_multi_gpu.py
@@ -0,0 +1,69 @@
+import unittest
+import trtorch
+import torch
+import torchvision.models as models
+
+from model_test_case import ModelTestCase
+
+class TestMultiGpuSwitching(ModelTestCase):
+ def setUp(self):
+ if torch.cuda.device_count() < 2:
+ self.fail("Test is not relevant for this platform since number of available CUDA devices is less than 2")
+
+ trtorch.set_device(0)
+ self.target_gpu = 1
+ self.input = torch.randn((1, 3, 224, 224)).to("cuda:1")
+ self.model = self.model.to("cuda:1")
+ self.traced_model = torch.jit.trace(self.model, [self.input])
+ self.scripted_model = torch.jit.script(self.model)
+
+ def test_compile_traced(self):
+ trtorch.set_device(0)
+ compile_spec = {
+ "input_shapes": [self.input.shape],
+ "device": {
+ "device_type": trtorch.DeviceType.GPU,
+ "gpu_id": self.target_gpu,
+ "dla_core": 0,
+ "allow_gpu_fallback": False,
+ "disable_tf32": False
+ }
+ }
+
+ trt_mod = trtorch.compile(self.traced_model, compile_spec)
+ trtorch.set_device(self.target_gpu)
+ same = (trt_mod(self.input) - self.traced_model(self.input)).abs().max()
+ trtorch.set_device(0)
+ self.assertTrue(same < 2e-3)
+
+ def test_compile_script(self):
+ trtorch.set_device(0)
+ compile_spec = {
+ "input_shapes": [self.input.shape],
+ "device": {
+ "device_type": trtorch.DeviceType.GPU,
+ "gpu_id": self.target_gpu,
+ "dla_core": 0,
+ "allow_gpu_fallback": False,
+ "disable_tf32": False
+ }
+ }
+
+ trt_mod = trtorch.compile(self.scripted_model, compile_spec)
+ trtorch.set_device(self.target_gpu)
+ same = (trt_mod(self.input) - self.scripted_model(self.input)).abs().max()
+ trtorch.set_device(0)
+ self.assertTrue(same < 2e-3)
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(TestMultiGpuSwitching.parametrize(TestMultiGpuSwitching, model=models.resnet18(pretrained=True)))
+
+ return suite
+
+suite = test_suite()
+
+runner = unittest.TextTestRunner()
+result = runner.run(suite)
+
+exit(int(not result.wasSuccessful()))
|
Gallopsled__pwntools-1716 | Importing pwntools breaks carriage return
Normally, a loop like below
```
for i in range(0, 5):
print(str(i), end="\r")
```
should print each number in the same space. However, when I import pwntools, this behavior breaks and each one is printed on a new line or sequentially.
System is ubuntu 18.04. Latest pwntools version.
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport atexit\nimport os\nimport re\nimport signal\nimport six\nimport struct\nimport sys\nimport threading\nimport traceback\n\nif sys.platform != 'win32':\n import fcntl\n import termios\n\nfrom pwnlib.context import ContextType\nfrom pwnlib.term import termcap\n\n__all__ = ['output', 'init']\n\n# we assume no terminal can display more lines than this\nMAX_TERM_HEIGHT = 200\n\n# default values\nwidth = 80\nheight = 25\n\n# list of callbacks triggered on SIGWINCH\non_winch = []\n\n\n\nsettings = None\n_graphics_mode = False\n\nfd = sys.stdout\n\ndef show_cursor():\n do('cnorm')\n\ndef hide_cursor():\n do('civis')\n\ndef update_geometry():\n global width, height\n hw = fcntl.ioctl(fd.fileno(), termios.TIOCGWINSZ, '1234')\n h, w = struct.unpack('hh', hw)\n # if the window shrunk and theres still free space at the bottom move\n # everything down\n if h < height and scroll == 0:\n if cells and cells[-1].end[0] < 0:\n delta = min(height - h, 1 - cells[-1].end[0])\n for cell in cells:\n cell.end = (cell.end[0] + delta, cell.end[1])\n cell.start = (cell.start[0] + delta, cell.start[1])\n height, width = h, w\n\ndef handler_sigwinch(signum, stack):\n update_geometry()\n redraw()\n for cb in on_winch:\n cb()\n\ndef handler_sigstop(signum, stack):\n resetterm()\n os.kill(os.getpid(), signal.SIGSTOP)\n\ndef handler_sigcont(signum, stack):\n setupterm()\n redraw()\n\ndef setupterm():\n global settings\n update_geometry()\n hide_cursor()\n do('smkx') # keypad mode\n if not settings:\n settings = termios.tcgetattr(fd.fileno())\n mode = termios.tcgetattr(fd.fileno())\n IFLAG = 0\n OFLAG = 1\n CFLAG = 2\n LFLAG = 3\n ISPEED = 4\n OSPEED = 5\n CC = 6\n mode[LFLAG] = mode[LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN)\n mode[CC][termios.VMIN] = 1\n mode[CC][termios.VTIME] = 0\n termios.tcsetattr(fd, termios.TCSAFLUSH, mode)\n\ndef resetterm():\n if settings:\n termios.tcsetattr(fd.fileno(), termios.TCSADRAIN, settings)\n show_cursor()\n do('rmkx')\n fd.write(' \\x08') # XXX: i don't know why this is needed...\n # only necessary when suspending the process\n\ndef init():\n atexit.register(resetterm)\n setupterm()\n signal.signal(signal.SIGWINCH, handler_sigwinch)\n signal.signal(signal.SIGTSTP, handler_sigstop)\n signal.signal(signal.SIGCONT, handler_sigcont)\n # we start with one empty cell at the current cursor position\n put('\\x1b[6n')\n fd.flush()\n s = ''\n while True:\n c = os.read(fd.fileno(), 1)\n if not isinstance(c, six.string_types):\n c = c.decode('utf-8')\n s += c\n if c == 'R':\n break\n row, col = re.findall('\\x1b' + r'\\[(\\d*);(\\d*)R', s)[0]\n row = int(row) - height\n col = int(col) - 1\n cell = Cell()\n cell.start = (row, col)\n cell.end = (row, col)\n cell.content = []\n cell.frozen = True\n cell.float = 0\n cell.indent = 0\n cells.append(cell)\n class Wrapper:\n def __init__(self, fd):\n self._fd = fd\n def write(self, s):\n output(s, frozen = True)\n def __getattr__(self, k):\n return self._fd.__getattribute__(k)\n if sys.stdout.isatty():\n sys.stdout = Wrapper(sys.stdout)\n if sys.stderr.isatty():\n sys.stderr = Wrapper(sys.stderr)\n\n console = ContextType.defaults['log_console']\n if console.isatty():\n ContextType.defaults['log_console'] = Wrapper(console)\n\n # freeze all cells if an exception is thrown\n orig_hook = sys.excepthook\n def hook(*args):\n resetterm()\n for c in cells:\n c.frozen = True\n c.float = 0\n if orig_hook:\n orig_hook(*args)\n else:\n traceback.print_exception(*args)\n # this is a bit esoteric\n # look here for details: https://stackoverflow.com/questions/12790328/how-to-silence-sys-excepthook-is-missing-error\n if fd.fileno() == 2:\n os.close(fd.fileno())\n sys.excepthook = hook\n\ndef put(s):\n if not isinstance(s, six.string_types):\n s = s.decode('utf-8')\n fd.write(s)\n\ndef flush(): fd.flush()\n\ndef do(c, *args):\n s = termcap.get(c, *args)\n if s:\n put(s)\n\ndef goto(r, c):\n do('cup', r - scroll + height - 1, c)\n\ncells = []\nscroll = 0\n\nclass Cell(object):\n pass\n\nclass Handle:\n def __init__(self, cell, is_floating):\n self.h = id(cell)\n self.is_floating = is_floating\n def update(self, s):\n update(self.h, s)\n def freeze(self):\n freeze(self.h)\n def delete(self):\n delete(self.h)\n\nSTR, CSI, LF, BS, CR, SOH, STX, OOB = range(8)\ndef parse_csi(buf, offset):\n i = offset\n while i < len(buf):\n c = buf[i]\n if c >= 0x40 and c < 0x80:\n break\n i += 1\n if i >= len(buf):\n return\n end = i\n cmd = [c, None, None]\n i = offset\n in_num = False\n args = []\n if buf[i] >= ord('<') and buf[i] <= ord('?'):\n cmd[1] = buf[i]\n i += 1\n while i < end:\n c = buf[i]\n if c >= ord('0') and c <= ord('9'):\n if not in_num:\n args.append(c - ord('0'))\n in_num = True\n else:\n args[-1] = args[-1] * 10 + c - ord('0')\n elif c == ord(';'):\n if not in_num:\n args.append(None)\n in_num = False\n if len(args) > 16:\n break\n elif c >= 0x20 and c <= 0x2f:\n cmd[2] = c\n break\n i += 1\n return cmd, args, end + 1\n\ndef parse_utf8(buf, offset):\n c0 = buf[offset]\n n = 0\n if c0 & 0b11100000 == 0b11000000:\n n = 2\n elif c0 & 0b11110000 == 0b11100000:\n n = 3\n elif c0 & 0b11111000 == 0b11110000:\n n = 4\n elif c0 & 0b11111100 == 0b11111000:\n n = 5\n elif c0 & 0b11111110 == 0b11111100:\n n = 6\n if n:\n return offset + n\n\ndef parse(s):\n global _graphics_mode\n if isinstance(s, six.text_type):\n s = s.encode('utf8')\n out = []\n buf = bytearray(s)\n i = 0\n while i < len(buf):\n x = None\n c = buf[i]\n if c >= 0x20 and c <= 0x7e:\n x = (STR, [six.int2byte(c)])\n i += 1\n elif c & 0xc0:\n j = parse_utf8(buf, i)\n if j:\n x = (STR, [b''.join(map(six.int2byte, buf[i : j]))])\n i = j\n elif c == 0x1b and len(buf) > i + 1:\n c1 = buf[i + 1]\n if c1 == ord('['):\n ret = parse_csi(buf, i + 2)\n if ret:\n cmd, args, j = ret\n x = (CSI, (cmd, args, b''.join(map(six.int2byte, buf[i : j]))))\n i = j\n elif c1 == ord(']'):\n # XXX: this is a dirty hack:\n # we still need to do our homework on this one, but what we do\n # here is supporting setting the terminal title and updating\n # the color map. we promise to do it properly in the next\n # iteration of this terminal emulation/compatibility layer\n # related: https://unix.stackexchange.com/questions/5936/can-i-set-my-local-machines-terminal-colors-to-use-those-of-the-machine-i-ssh-i\n try:\n j = s.index('\\x07', i)\n except Exception:\n try:\n j = s.index('\\x1b\\\\', i)\n except Exception:\n j = 1\n x = (OOB, s[i:j + 1])\n i = j + 1\n elif c1 in map(ord, '()'): # select G0 or G1\n i += 3\n continue\n elif c1 in map(ord, '>='): # set numeric/application keypad mode\n i += 2\n continue\n elif c1 == ord('P'):\n _graphics_mode = True\n i += 2\n continue\n elif c1 == ord('\\\\'):\n _graphics_mode = False\n i += 2\n continue\n elif c == 0x01:\n x = (SOH, None)\n i += 1\n elif c == 0x02:\n x = (STX, None)\n i += 1\n elif c == 0x08:\n x = (BS, None)\n i += 1\n elif c == 0x09:\n x = (STR, [b' ']) # who the **** uses tabs anyway?\n i += 1\n elif c == 0x0a:\n x = (LF, None)\n i += 1\n elif c == 0x0d:\n x = (CR, None)\n i += 1\n else:\n i += 1\n\n if _graphics_mode:\n continue\n if x is None:\n x = (STR, [six.int2byte(c) for c in bytearray(b'\\\\x%02x' % c)])\n i += 1\n if x[0] == STR and out and out[-1][0] == STR:\n out[-1][1].extend(x[1])\n else:\n out.append(x)\n return out\n\nsaved_cursor = None\n# XXX: render cells that is half-way on the screen\ndef render_cell(cell, clear_after = False):\n global scroll, saved_cursor\n row, col = cell.start\n row = row - scroll + height - 1\n if row < 0:\n return\n indent = min(cell.indent, width - 1)\n for t, x in cell.content:\n if t == STR:\n i = 0\n while i < len(x):\n if col >= width:\n col = 0\n row += 1\n if col < indent:\n put(' ' * (indent - col))\n col = indent\n c = x[i]\n if not hasattr(c, 'encode'):\n c = c.decode('utf-8', 'backslashreplace')\n put(c)\n col += 1\n i += 1\n elif t == CSI:\n cmd, args, c = x\n put(c)\n # figure out if the cursor moved (XXX: here probably be bugs)\n if cmd[1] is None and cmd[2] is None:\n c = cmd[0]\n if len(args) >= 1:\n n = args[0]\n else:\n n = None\n if len(args) >= 2:\n m = args[1]\n else:\n m = None\n if c == ord('A'):\n n = n or 1\n row = max(0, row - n)\n elif c == ord('B'):\n n = n or 1\n row = min(height - 1, row + n)\n elif c == ord('C'):\n n = n or 1\n col = min(width - 1, col + n)\n elif c == ord('D'):\n n = n or 1\n col = max(0, col - n)\n elif c == ord('E'):\n n = n or 1\n row = min(height - 1, row + n)\n col = 0\n elif c == ord('F'):\n n = n or 1\n row = max(0, row - n)\n col = 0\n elif c == ord('G'):\n n = n or 1\n col = min(width - 1, n - 1)\n elif c == ord('H') or c == ord('f'):\n n = n or 1\n m = m or 1\n row = min(height - 1, n - 1)\n col = min(width - 1, m - 1)\n elif c == ord('S'):\n n = n or 1\n scroll += n\n row = max(0, row - n)\n elif c == ord('T'):\n n = n or 1\n scroll -= n\n row = min(height - 1, row + n)\n elif c == ord('s'):\n saved_cursor = row, col\n elif c == ord('u'):\n if saved_cursor:\n row, col = saved_cursor\n elif t == LF:\n if clear_after and col <= width - 1:\n put('\\x1b[K') # clear line\n put('\\n')\n col = 0\n row += 1\n elif t == BS:\n if col > 0:\n put('\\x08')\n col -= 1\n elif t == CR:\n# put('\\r')\n col = 0\n elif t == SOH:\n put('\\x01')\n elif t == STX:\n put('\\x02')\n elif t == OOB:\n put(x)\n if row >= height:\n d = row - height + 1\n scroll += d\n row -= d\n row = row + scroll - height + 1\n cell.end = (row, col)\n\ndef render_from(i, force = False, clear_after = False):\n e = None\n # `i` should always be a valid cell, but in case i f***ed up somewhere, I'll\n # check it and just do nothing if something went wrong.\n if i < 0 or i >= len(cells):\n return\n goto(*cells[i].start)\n for c in cells[i:]:\n if not force and c.start == e:\n goto(*cells[-1].end)\n break\n elif e:\n c.start = e\n render_cell(c, clear_after = clear_after)\n e = c.end\n if clear_after and (e[0] < scroll or e[1] < width - 1):\n put('\\x1b[J')\n flush()\n\ndef redraw():\n for i in reversed(range(len(cells))):\n row = cells[i].start[0]\n if row - scroll + height <= 0:\n # XXX: remove this line when render_cell is fixed\n i += 1\n break\n else:\n if not cells:\n return\n render_from(i, force = True, clear_after = True)\n\nlock = threading.Lock()\ndef output(s = '', float = False, priority = 10, frozen = False,\n indent = 0, before = None, after = None):\n with lock:\n rel = before or after\n if rel:\n i, _ = find_cell(rel.h)\n is_floating = rel.is_floating\n float = cells[i].float\n if before:\n i -= 1\n elif float and priority:\n is_floating = True\n float = priority\n for i in reversed(range(len(cells))):\n if cells[i].float <= float:\n break\n else:\n is_floating = False\n i = len(cells) - 1\n while i > 0 and cells[i].float:\n i -= 1\n # put('xx %d\\n' % i)\n cell = Cell()\n cell.content = parse(s)\n cell.frozen = frozen\n cell.float = float\n cell.indent = indent\n cell.start = cells[i].end\n i += 1\n cells.insert(i, cell)\n h = Handle(cell, is_floating)\n if not s:\n cell.end = cell.start\n return h\n # the invariant is that the cursor is placed after the last cell\n if i == len(cells) - 1:\n render_cell(cell, clear_after = True)\n flush()\n else:\n render_from(i, clear_after = True)\n return h\n\ndef find_cell(h):\n for i, c in enumerate(cells):\n if id(c) == h:\n return i, c\n raise KeyError\n\ndef discard_frozen():\n # we assume that no cell will shrink very much and that noone has space\n # for more than MAX_TERM_HEIGHT lines in their terminal\n while len(cells) > 1 and scroll - cells[0].end[0] > MAX_TERM_HEIGHT:\n c = cells.pop(0)\n del c # trigger GC maybe, kthxbai\n\ndef update(h, s):\n with lock:\n try:\n i, c = find_cell(h)\n except KeyError:\n return\n if not c.frozen and c.content != s:\n c.content = parse(s)\n render_from(i, clear_after = True)\n\ndef freeze(h):\n try:\n i, c = find_cell(h)\n c.frozen = True\n c.float = 0\n if c.content == []:\n cells.pop(i)\n discard_frozen()\n except KeyError:\n return\n\ndef delete(h):\n update(h, '')\n freeze(h)\n",
"path": "pwnlib/term/term.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport atexit\nimport os\nimport re\nimport signal\nimport six\nimport struct\nimport sys\nimport threading\nimport traceback\n\nif sys.platform != 'win32':\n import fcntl\n import termios\n\nfrom pwnlib.context import ContextType\nfrom pwnlib.term import termcap\n\n__all__ = ['output', 'init']\n\n# we assume no terminal can display more lines than this\nMAX_TERM_HEIGHT = 200\n\n# default values\nwidth = 80\nheight = 25\n\n# list of callbacks triggered on SIGWINCH\non_winch = []\n\n\n\nsettings = None\n_graphics_mode = False\n\nfd = sys.stdout\n\ndef show_cursor():\n do('cnorm')\n\ndef hide_cursor():\n do('civis')\n\ndef update_geometry():\n global width, height\n hw = fcntl.ioctl(fd.fileno(), termios.TIOCGWINSZ, '1234')\n h, w = struct.unpack('hh', hw)\n # if the window shrunk and theres still free space at the bottom move\n # everything down\n if h < height and scroll == 0:\n if cells and cells[-1].end[0] < 0:\n delta = min(height - h, 1 - cells[-1].end[0])\n for cell in cells:\n cell.end = (cell.end[0] + delta, cell.end[1])\n cell.start = (cell.start[0] + delta, cell.start[1])\n height, width = h, w\n\ndef handler_sigwinch(signum, stack):\n update_geometry()\n redraw()\n for cb in on_winch:\n cb()\n\ndef handler_sigstop(signum, stack):\n resetterm()\n os.kill(os.getpid(), signal.SIGSTOP)\n\ndef handler_sigcont(signum, stack):\n setupterm()\n redraw()\n\ndef setupterm():\n global settings\n update_geometry()\n hide_cursor()\n do('smkx') # keypad mode\n if not settings:\n settings = termios.tcgetattr(fd.fileno())\n mode = termios.tcgetattr(fd.fileno())\n IFLAG = 0\n OFLAG = 1\n CFLAG = 2\n LFLAG = 3\n ISPEED = 4\n OSPEED = 5\n CC = 6\n mode[LFLAG] = mode[LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN)\n mode[CC][termios.VMIN] = 1\n mode[CC][termios.VTIME] = 0\n termios.tcsetattr(fd, termios.TCSAFLUSH, mode)\n\ndef resetterm():\n if settings:\n termios.tcsetattr(fd.fileno(), termios.TCSADRAIN, settings)\n show_cursor()\n do('rmkx')\n fd.write(' \\x08') # XXX: i don't know why this is needed...\n # only necessary when suspending the process\n\ndef init():\n atexit.register(resetterm)\n setupterm()\n signal.signal(signal.SIGWINCH, handler_sigwinch)\n signal.signal(signal.SIGTSTP, handler_sigstop)\n signal.signal(signal.SIGCONT, handler_sigcont)\n # we start with one empty cell at the current cursor position\n put('\\x1b[6n')\n fd.flush()\n s = ''\n while True:\n c = os.read(fd.fileno(), 1)\n if not isinstance(c, six.string_types):\n c = c.decode('utf-8')\n s += c\n if c == 'R':\n break\n row, col = re.findall('\\x1b' + r'\\[(\\d*);(\\d*)R', s)[0]\n row = int(row) - height\n col = int(col) - 1\n cell = Cell()\n cell.start = (row, col)\n cell.end = (row, col)\n cell.content = []\n cell.frozen = True\n cell.float = 0\n cell.indent = 0\n cells.append(cell)\n class Wrapper:\n def __init__(self, fd):\n self._fd = fd\n def write(self, s):\n output(s, frozen = True)\n def __getattr__(self, k):\n return self._fd.__getattribute__(k)\n if sys.stdout.isatty():\n sys.stdout = Wrapper(sys.stdout)\n if sys.stderr.isatty():\n sys.stderr = Wrapper(sys.stderr)\n\n console = ContextType.defaults['log_console']\n if console.isatty():\n ContextType.defaults['log_console'] = Wrapper(console)\n\n # freeze all cells if an exception is thrown\n orig_hook = sys.excepthook\n def hook(*args):\n resetterm()\n for c in cells:\n c.frozen = True\n c.float = 0\n if orig_hook:\n orig_hook(*args)\n else:\n traceback.print_exception(*args)\n # this is a bit esoteric\n # look here for details: https://stackoverflow.com/questions/12790328/how-to-silence-sys-excepthook-is-missing-error\n if fd.fileno() == 2:\n os.close(fd.fileno())\n sys.excepthook = hook\n\ndef put(s):\n if not isinstance(s, six.string_types):\n s = s.decode('utf-8')\n fd.write(s)\n\ndef flush(): fd.flush()\n\ndef do(c, *args):\n s = termcap.get(c, *args)\n if s:\n put(s)\n\ndef goto(r, c):\n do('cup', r - scroll + height - 1, c)\n\ncells = []\nscroll = 0\n\nclass Cell(object):\n pass\n\nclass Handle:\n def __init__(self, cell, is_floating):\n self.h = id(cell)\n self.is_floating = is_floating\n def update(self, s):\n update(self.h, s)\n def freeze(self):\n freeze(self.h)\n def delete(self):\n delete(self.h)\n\nSTR, CSI, LF, BS, CR, SOH, STX, OOB = range(8)\ndef parse_csi(buf, offset):\n i = offset\n while i < len(buf):\n c = buf[i]\n if c >= 0x40 and c < 0x80:\n break\n i += 1\n if i >= len(buf):\n return\n end = i\n cmd = [c, None, None]\n i = offset\n in_num = False\n args = []\n if buf[i] >= ord('<') and buf[i] <= ord('?'):\n cmd[1] = buf[i]\n i += 1\n while i < end:\n c = buf[i]\n if c >= ord('0') and c <= ord('9'):\n if not in_num:\n args.append(c - ord('0'))\n in_num = True\n else:\n args[-1] = args[-1] * 10 + c - ord('0')\n elif c == ord(';'):\n if not in_num:\n args.append(None)\n in_num = False\n if len(args) > 16:\n break\n elif c >= 0x20 and c <= 0x2f:\n cmd[2] = c\n break\n i += 1\n return cmd, args, end + 1\n\ndef parse_utf8(buf, offset):\n c0 = buf[offset]\n n = 0\n if c0 & 0b11100000 == 0b11000000:\n n = 2\n elif c0 & 0b11110000 == 0b11100000:\n n = 3\n elif c0 & 0b11111000 == 0b11110000:\n n = 4\n elif c0 & 0b11111100 == 0b11111000:\n n = 5\n elif c0 & 0b11111110 == 0b11111100:\n n = 6\n if n:\n return offset + n\n\ndef parse(s):\n global _graphics_mode\n if isinstance(s, six.text_type):\n s = s.encode('utf8')\n out = []\n buf = bytearray(s)\n i = 0\n while i < len(buf):\n x = None\n c = buf[i]\n if c >= 0x20 and c <= 0x7e:\n x = (STR, [six.int2byte(c)])\n i += 1\n elif c & 0xc0:\n j = parse_utf8(buf, i)\n if j:\n x = (STR, [b''.join(map(six.int2byte, buf[i : j]))])\n i = j\n elif c == 0x1b and len(buf) > i + 1:\n c1 = buf[i + 1]\n if c1 == ord('['):\n ret = parse_csi(buf, i + 2)\n if ret:\n cmd, args, j = ret\n x = (CSI, (cmd, args, b''.join(map(six.int2byte, buf[i : j]))))\n i = j\n elif c1 == ord(']'):\n # XXX: this is a dirty hack:\n # we still need to do our homework on this one, but what we do\n # here is supporting setting the terminal title and updating\n # the color map. we promise to do it properly in the next\n # iteration of this terminal emulation/compatibility layer\n # related: https://unix.stackexchange.com/questions/5936/can-i-set-my-local-machines-terminal-colors-to-use-those-of-the-machine-i-ssh-i\n try:\n j = s.index('\\x07', i)\n except Exception:\n try:\n j = s.index('\\x1b\\\\', i)\n except Exception:\n j = 1\n x = (OOB, s[i:j + 1])\n i = j + 1\n elif c1 in map(ord, '()'): # select G0 or G1\n i += 3\n continue\n elif c1 in map(ord, '>='): # set numeric/application keypad mode\n i += 2\n continue\n elif c1 == ord('P'):\n _graphics_mode = True\n i += 2\n continue\n elif c1 == ord('\\\\'):\n _graphics_mode = False\n i += 2\n continue\n elif c == 0x01:\n x = (SOH, None)\n i += 1\n elif c == 0x02:\n x = (STX, None)\n i += 1\n elif c == 0x08:\n x = (BS, None)\n i += 1\n elif c == 0x09:\n x = (STR, [b' ']) # who the **** uses tabs anyway?\n i += 1\n elif c == 0x0a:\n x = (LF, None)\n i += 1\n elif c == 0x0d:\n x = (CR, None)\n i += 1\n else:\n i += 1\n\n if _graphics_mode:\n continue\n if x is None:\n x = (STR, [six.int2byte(c) for c in bytearray(b'\\\\x%02x' % c)])\n i += 1\n if x[0] == STR and out and out[-1][0] == STR:\n out[-1][1].extend(x[1])\n else:\n out.append(x)\n return out\n\nsaved_cursor = None\n# XXX: render cells that is half-way on the screen\ndef render_cell(cell, clear_after = False):\n global scroll, saved_cursor\n row, col = cell.start\n row = row - scroll + height - 1\n if row < 0:\n return\n indent = min(cell.indent, width - 1)\n for t, x in cell.content:\n if t == STR:\n i = 0\n while i < len(x):\n if col >= width:\n col = 0\n row += 1\n if col < indent:\n put(' ' * (indent - col))\n col = indent\n c = x[i]\n if not hasattr(c, 'encode'):\n c = c.decode('utf-8', 'backslashreplace')\n put(c)\n col += 1\n i += 1\n elif t == CSI:\n cmd, args, c = x\n put(c)\n # figure out if the cursor moved (XXX: here probably be bugs)\n if cmd[1] is None and cmd[2] is None:\n c = cmd[0]\n if len(args) >= 1:\n n = args[0]\n else:\n n = None\n if len(args) >= 2:\n m = args[1]\n else:\n m = None\n if c == ord('A'):\n n = n or 1\n row = max(0, row - n)\n elif c == ord('B'):\n n = n or 1\n row = min(height - 1, row + n)\n elif c == ord('C'):\n n = n or 1\n col = min(width - 1, col + n)\n elif c == ord('D'):\n n = n or 1\n col = max(0, col - n)\n elif c == ord('E'):\n n = n or 1\n row = min(height - 1, row + n)\n col = 0\n elif c == ord('F'):\n n = n or 1\n row = max(0, row - n)\n col = 0\n elif c == ord('G'):\n n = n or 1\n col = min(width - 1, n - 1)\n elif c == ord('H') or c == ord('f'):\n n = n or 1\n m = m or 1\n row = min(height - 1, n - 1)\n col = min(width - 1, m - 1)\n elif c == ord('S'):\n n = n or 1\n scroll += n\n row = max(0, row - n)\n elif c == ord('T'):\n n = n or 1\n scroll -= n\n row = min(height - 1, row + n)\n elif c == ord('s'):\n saved_cursor = row, col\n elif c == ord('u'):\n if saved_cursor:\n row, col = saved_cursor\n elif t == LF:\n if clear_after and col <= width - 1:\n put('\\x1b[K') # clear line\n put('\\n')\n col = 0\n row += 1\n elif t == BS:\n if col > 0:\n put('\\x08')\n col -= 1\n elif t == CR:\n put('\\r')\n col = 0\n elif t == SOH:\n put('\\x01')\n elif t == STX:\n put('\\x02')\n elif t == OOB:\n put(x)\n if row >= height:\n d = row - height + 1\n scroll += d\n row -= d\n row = row + scroll - height + 1\n cell.end = (row, col)\n\ndef render_from(i, force = False, clear_after = False):\n e = None\n # `i` should always be a valid cell, but in case i f***ed up somewhere, I'll\n # check it and just do nothing if something went wrong.\n if i < 0 or i >= len(cells):\n return\n goto(*cells[i].start)\n for c in cells[i:]:\n if not force and c.start == e:\n goto(*cells[-1].end)\n break\n elif e:\n c.start = e\n render_cell(c, clear_after = clear_after)\n e = c.end\n if clear_after and (e[0] < scroll or e[1] < width - 1):\n put('\\x1b[J')\n flush()\n\ndef redraw():\n for i in reversed(range(len(cells))):\n row = cells[i].start[0]\n if row - scroll + height <= 0:\n # XXX: remove this line when render_cell is fixed\n i += 1\n break\n else:\n if not cells:\n return\n render_from(i, force = True, clear_after = True)\n\nlock = threading.Lock()\ndef output(s = '', float = False, priority = 10, frozen = False,\n indent = 0, before = None, after = None):\n with lock:\n rel = before or after\n if rel:\n i, _ = find_cell(rel.h)\n is_floating = rel.is_floating\n float = cells[i].float\n if before:\n i -= 1\n elif float and priority:\n is_floating = True\n float = priority\n for i in reversed(range(len(cells))):\n if cells[i].float <= float:\n break\n else:\n is_floating = False\n i = len(cells) - 1\n while i > 0 and cells[i].float:\n i -= 1\n # put('xx %d\\n' % i)\n cell = Cell()\n cell.content = parse(s)\n cell.frozen = frozen\n cell.float = float\n cell.indent = indent\n cell.start = cells[i].end\n i += 1\n cells.insert(i, cell)\n h = Handle(cell, is_floating)\n if not s:\n cell.end = cell.start\n return h\n # the invariant is that the cursor is placed after the last cell\n if i == len(cells) - 1:\n render_cell(cell, clear_after = True)\n flush()\n else:\n render_from(i, clear_after = True)\n return h\n\ndef find_cell(h):\n for i, c in enumerate(cells):\n if id(c) == h:\n return i, c\n raise KeyError\n\ndef discard_frozen():\n # we assume that no cell will shrink very much and that noone has space\n # for more than MAX_TERM_HEIGHT lines in their terminal\n while len(cells) > 1 and scroll - cells[0].end[0] > MAX_TERM_HEIGHT:\n c = cells.pop(0)\n del c # trigger GC maybe, kthxbai\n\ndef update(h, s):\n with lock:\n try:\n i, c = find_cell(h)\n except KeyError:\n return\n if not c.frozen and c.content != s:\n c.content = parse(s)\n render_from(i, clear_after = True)\n\ndef freeze(h):\n try:\n i, c = find_cell(h)\n c.frozen = True\n c.float = 0\n if c.content == []:\n cells.pop(i)\n discard_frozen()\n except KeyError:\n return\n\ndef delete(h):\n update(h, '')\n freeze(h)\n",
"path": "pwnlib/term/term.py"
}
] | diff --git a/pwnlib/term/term.py b/pwnlib/term/term.py
index df35e68a5..2d0f41d6c 100644
--- a/pwnlib/term/term.py
+++ b/pwnlib/term/term.py
@@ -425,7 +425,7 @@ def render_cell(cell, clear_after = False):
put('\x08')
col -= 1
elif t == CR:
-# put('\r')
+ put('\r')
col = 0
elif t == SOH:
put('\x01')
|
bokeh__bokeh-9368 | Use `npm ci` to force usage of the lock file
`npm ci` should be used in situations where any update of dependencies is undesired, especially in CI. `npm ci` installs dependencies exactly as specified in `package-lock.json`. On the other hand `npm install` can still perform minor updates.
| [
{
"content": "'''\n\n'''\nimport shutil\nfrom glob import glob\nfrom os.path import dirname, exists, join, realpath, relpath\nimport os, re, subprocess, sys, time\n\nimport versioneer\n\n# provide fallbacks for highlights in case colorama is not installed\ntry:\n import colorama\n from colorama import Fore, Style\n\n def bright(text): return \"%s%s%s\" % (Style.BRIGHT, text, Style.RESET_ALL)\n def dim(text): return \"%s%s%s\" % (Style.DIM, text, Style.RESET_ALL)\n def red(text): return \"%s%s%s\" % (Fore.RED, text, Style.RESET_ALL)\n def green(text): return \"%s%s%s\" % (Fore.GREEN, text, Style.RESET_ALL)\n def yellow(text): return \"%s%s%s\" % (Fore.YELLOW, text, Style.RESET_ALL)\n sys.platform == \"win32\" and colorama.init()\nexcept ImportError:\n def bright(text): return text\n def dim(text): return text\n def red(text) : return text\n def green(text) : return text\n def yellow(text) : return text\n\n# -----------------------------------------------------------------------------\n# Module global variables\n# -----------------------------------------------------------------------------\n\nROOT = dirname(realpath(__file__))\nBOKEHJSROOT = join(ROOT, 'bokehjs')\nBOKEHJSBUILD = join(BOKEHJSROOT, 'build')\nJS = join(BOKEHJSBUILD, 'js')\nSERVER = join(ROOT, 'bokeh/server')\nTSLIB = join(BOKEHJSROOT , 'node_modules/typescript/lib')\n\n# -----------------------------------------------------------------------------\n# Helpers for command line operations\n# -----------------------------------------------------------------------------\n\ndef show_bokehjs(bokehjs_action, develop=False):\n ''' Print a useful report after setuptools output describing where and how\n BokehJS is installed.\n\n Args:\n bokehjs_action (str) : one of 'built', 'installed', or 'packaged'\n how (or if) BokehJS was installed into the python source tree\n\n develop (bool, optional) :\n whether the command was for \"develop\" mode (default: False)\n\n Returns:\n None\n\n '''\n print()\n if develop:\n print(\"Installed Bokeh for DEVELOPMENT:\")\n else:\n print(\"Installed Bokeh:\")\n if bokehjs_action in ['built', 'installed']:\n print(\" - using %s built BokehJS from bokehjs/build\\n\" % (bright(yellow(\"NEWLY\")) if bokehjs_action=='built' else bright(yellow(\"PREVIOUSLY\"))))\n else:\n print(\" - using %s BokehJS, located in 'bokeh.server.static'\\n\" % bright(yellow(\"PACKAGED\")))\n print()\n\ndef show_help(bokehjs_action):\n ''' Print information about extra Bokeh-specific command line options.\n\n Args:\n bokehjs_action (str) : one of 'built', 'installed', or 'packaged'\n how (or if) BokehJS was installed into the python source tree\n\n Returns:\n None\n\n '''\n print()\n if bokehjs_action in ['built', 'installed']:\n print(\"Bokeh-specific options available with 'install' or 'develop':\")\n print()\n print(\" --build-js build and install a fresh BokehJS\")\n print(\" --install-js install only last previously built BokehJS\")\n else:\n print(\"Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'\")\n print()\n print(\"No extra Bokeh-specific options are available.\")\n print()\n\n# -----------------------------------------------------------------------------\n# Other functions used directly by setup.py\n# -----------------------------------------------------------------------------\n\ndef build_or_install_bokehjs():\n ''' Build a new BokehJS (and install it) or install a previously build\n BokehJS.\n\n If no options ``--build-js`` or ``--install-js`` are detected, the\n user is prompted for what to do.\n\n If ``--existing-js`` is detected, then this setup.py is being run from a\n packaged sdist, no action is taken.\n\n Note that ``-build-js`` is only compatible with the following ``setup.py``\n commands: install, develop, sdist, egg_info, build\n\n Returns:\n str : one of 'built', 'installed', 'packaged'\n How (or if) BokehJS was installed into the python source tree\n\n '''\n\n # This happens when building from inside a published, pre-packaged sdist\n # The --existing-js option is not otherwise documented\n if '--existing-js' in sys.argv:\n sys.argv.remove('--existing-js')\n return \"packaged\"\n\n if '--build-js' not in sys.argv and '--install-js' not in sys.argv:\n jsbuild = jsbuild_prompt()\n\n elif '--build-js' in sys.argv:\n jsbuild = True\n sys.argv.remove('--build-js')\n\n # must be \"--install-js\"\n else:\n jsbuild = False\n sys.argv.remove('--install-js')\n\n jsbuild_ok = ('install', 'develop', 'sdist', 'egg_info', 'build')\n if jsbuild and not any(arg in sys.argv for arg in jsbuild_ok):\n print(\"Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.\")\n sys.exit(1)\n\n if jsbuild:\n build_js()\n install_js()\n return \"built\"\n else:\n install_js()\n return \"installed\"\n\ndef conda_rendering():\n return os.getenv(\"CONDA_BUILD_STATE\" ,\"junk\") == \"RENDER\"\n\n\ndef fixup_building_sdist():\n ''' Check for 'sdist' and ensure we always build BokehJS when packaging\n\n Source distributions do not ship with BokehJS source code, but must ship\n with a pre-built BokehJS library. This function modifies ``sys.argv`` as\n necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.\n\n Returns:\n None\n\n '''\n if \"sdist\" in sys.argv:\n if \"--install-js\" in sys.argv:\n print(\"Removing '--install-js' incompatible with 'sdist'\")\n sys.argv.remove('--install-js')\n if \"--build-js\" not in sys.argv:\n print(\"Adding '--build-js' required for 'sdist'\")\n sys.argv.append('--build-js')\n\ndef fixup_for_packaged():\n ''' If we are installing FROM an sdist, then a pre-built BokehJS is\n already installed in the python source tree.\n\n The command line options ``--build-js`` or ``--install-js`` are\n removed from ``sys.argv``, with a warning.\n\n Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is\n already packaged.\n\n Returns:\n None\n\n '''\n if exists(join(ROOT, 'PKG-INFO')):\n if \"--build-js\" in sys.argv or \"--install-js\" in sys.argv:\n print(SDIST_BUILD_WARNING)\n if \"--build-js\" in sys.argv:\n sys.argv.remove('--build-js')\n if \"--install-js\" in sys.argv:\n sys.argv.remove('--install-js')\n if \"--existing-js\" not in sys.argv:\n sys.argv.append('--existing-js')\n\n# Horrible hack: workaround to allow creation of bdist_wheel on pip\n# installation. Why, for God's sake, is pip forcing the generation of wheels\n# when installing a package?\ndef get_cmdclass():\n ''' A ``cmdclass`` that works around a setuptools deficiency.\n\n There is no need to build wheels when installing a package, however some\n versions of setuptools seem to mandate this. This is a hacky workaround\n that modifies the ``cmdclass`` returned by versioneer so that not having\n wheel installed is not a fatal error.\n\n '''\n cmdclass = versioneer.get_cmdclass()\n\n try:\n from wheel.bdist_wheel import bdist_wheel\n except ImportError:\n # pip is not claiming for bdist_wheel when wheel is not installed\n bdist_wheel = None\n\n if bdist_wheel is not None:\n cmdclass[\"bdist_wheel\"] = bdist_wheel\n\n return cmdclass\n\ndef get_package_data():\n ''' All of all of the \"extra\" package data files collected by the\n ``package_files`` and ``package_path`` functions in ``setup.py``.\n\n '''\n return { 'bokeh': _PACKAGE_DATA }\n\ndef get_version():\n ''' The version of Bokeh currently checked out\n\n Returns:\n str : the version string\n\n '''\n return versioneer.get_version()\n\n# -----------------------------------------------------------------------------\n# Helpers for operation in the bokehjs dir\n# -----------------------------------------------------------------------------\n\ndef jsbuild_prompt():\n ''' Prompt users whether to build a new BokehJS or install an existing one.\n\n Returns:\n bool : True, if a new build is requested, False otherwise\n\n '''\n print(BOKEHJS_BUILD_PROMPT)\n mapping = {\"1\": True, \"2\": False}\n value = input(\"Choice? \")\n while value not in mapping:\n print(\"Input '%s' not understood. Valid choices: 1, 2\\n\" % value)\n value = input(\"Choice? \")\n return mapping[value]\n\n# -----------------------------------------------------------------------------\n# Helpers for operations in the bokehjs dir\n# -----------------------------------------------------------------------------\n\ndef build_js():\n ''' Build BokehJS files under the ``bokehjs`` source subdirectory.\n\n Also prints a table of statistics about the generated assets (file sizes,\n etc.) or any error messages if the build fails.\n\n Note this function only builds BokehJS assets, it does not install them\n into the python source tree.\n\n '''\n print(\"Building BokehJS... \", end=\"\")\n sys.stdout.flush()\n os.chdir('bokehjs')\n\n cmd = [\"node\", \"make\", 'build', '--emit-error']\n\n t0 = time.time()\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except OSError as e:\n print(BUILD_EXEC_FAIL_MSG % (cmd, e))\n sys.exit(1)\n finally:\n os.chdir('..')\n\n result = proc.wait()\n t1 = time.time()\n\n if result != 0:\n indented_msg = \"\"\n outmsg = proc.stdout.read().decode('ascii', errors='ignore')\n outmsg = \"\\n\".join(\" \" + x for x in outmsg.split(\"\\n\"))\n errmsg = proc.stderr.read().decode('ascii', errors='ignore')\n errmsg = \"\\n\".join(\" \" + x for x in errmsg.split(\"\\n\"))\n print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))\n sys.exit(1)\n\n indented_msg = \"\"\n msg = proc.stdout.read().decode('ascii', errors='ignore')\n pat = re.compile(r\"(\\[.*\\]) (.*)\", re.DOTALL)\n for line in msg.strip().split(\"\\n\"):\n m = pat.match(line)\n if not m: continue # skip generate.py output lines\n stamp, txt = m.groups()\n indented_msg += \" \" + dim(green(stamp)) + \" \" + dim(txt) + \"\\n\"\n msg = \"\\n\".join(\" \" + x for x in msg.split(\"\\n\"))\n print(BUILD_SUCCESS_MSG % indented_msg)\n print(\"Build time: %s\" % bright(yellow(\"%0.1f seconds\" % (t1-t0))))\n print()\n print(\"Build artifact sizes:\")\n try:\n def size(*path):\n return os.stat(join(\"bokehjs\", \"build\", *path)).st_size / 2**10\n\n print(\" - bokeh.js : %6.1f KB\" % size(\"js\", \"bokeh.js\"))\n print(\" - bokeh.min.js : %6.1f KB\" % size(\"js\", \"bokeh.min.js\"))\n\n print(\" - bokeh-widgets.js : %6.1f KB\" % size(\"js\", \"bokeh-widgets.js\"))\n print(\" - bokeh-widgets.min.js : %6.1f KB\" % size(\"js\", \"bokeh-widgets.min.js\"))\n\n print(\" - bokeh-tables.js : %6.1f KB\" % size(\"js\", \"bokeh-tables.js\"))\n print(\" - bokeh-tables.min.js : %6.1f KB\" % size(\"js\", \"bokeh-tables.min.js\"))\n\n print(\" - bokeh-api.js : %6.1f KB\" % size(\"js\", \"bokeh-api.js\"))\n print(\" - bokeh-api.min.js : %6.1f KB\" % size(\"js\", \"bokeh-api.min.js\"))\n except Exception as e:\n print(BUILD_SIZE_FAIL_MSG % e)\n sys.exit(1)\n\ndef install_js():\n ''' Copy built BokehJS files into the Python source tree.\n\n Returns:\n None\n\n '''\n target_jsdir = join(SERVER, 'static', 'js')\n target_tslibdir = join(SERVER, 'static', 'lib')\n\n STATIC_ASSETS = [\n join(JS, 'bokeh.js'),\n join(JS, 'bokeh.min.js'),\n ]\n if not all(exists(a) for a in STATIC_ASSETS):\n print(BOKEHJS_INSTALL_FAIL)\n sys.exit(1)\n\n if exists(target_jsdir):\n shutil.rmtree(target_jsdir)\n shutil.copytree(JS, target_jsdir)\n\n if exists(target_tslibdir):\n shutil.rmtree(target_tslibdir)\n if exists(TSLIB):\n os.mkdir(target_tslibdir)\n for lib_file in glob(join(TSLIB, \"lib.*.d.ts\")):\n shutil.copy(lib_file, target_tslibdir)\n\n# -----------------------------------------------------------------------------\n# Helpers for collecting package data\n# -----------------------------------------------------------------------------\n\n_PACKAGE_DATA = []\n\ndef package_files(*paths):\n '''\n\n '''\n _PACKAGE_DATA.extend(paths)\n\ndef package_path(path, filters=()):\n '''\n\n '''\n if not os.path.exists(path):\n raise RuntimeError(\"packaging non-existent path: %s\" % path)\n elif os.path.isfile(path):\n _PACKAGE_DATA.append(relpath(path, 'bokeh'))\n else:\n for path, dirs, files in os.walk(path):\n path = relpath(path, 'bokeh')\n for f in files:\n if not filters or f.endswith(filters):\n _PACKAGE_DATA.append(join(path, f))\n\n# -----------------------------------------------------------------------------\n# Status and error message strings\n# -----------------------------------------------------------------------------\n\nBOKEHJS_BUILD_PROMPT = \"\"\"\nBokeh includes a JavaScript library (BokehJS) that has its own\nbuild process. How would you like to handle BokehJS:\n\n1) build and install fresh BokehJS\n2) install last built BokehJS from bokeh/bokehjs/build\n\"\"\"\n\nBOKEHJS_INSTALL_FAIL = \"\"\"\nERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.\n\n\nPlease build BokehJS by running setup.py with the `--build-js` option.\n Dev Guide: https://docs.bokeh.org/en/latest/docs/dev_guide/setup.html.\n\"\"\"\n\nBUILD_EXEC_FAIL_MSG = bright(red(\"Failed.\")) + \"\"\"\n\nERROR: subprocess.Popen(%r) failed to execute:\n\n %s\n\nHave you run `npm install --no-save` from the bokehjs subdirectory?\nFor more information, see the Dev Guide:\n\n https://docs.bokeh.org/en/latest/docs/dev_guide.html\n\"\"\"\n\nBUILD_FAIL_MSG = bright(red(\"Failed.\")) + \"\"\"\n\nERROR: 'node make build' returned the following\n\n---- on stdout:\n%s\n\n---- on stderr:\n%s\n\"\"\"\n\nBUILD_SIZE_FAIL_MSG = \"\"\"\nERROR: could not determine sizes:\n\n %s\n\"\"\"\n\nBUILD_SUCCESS_MSG = bright(green(\"Success!\")) + \"\"\"\n\nBuild output:\n\n%s\"\"\"\n\nSDIST_BUILD_WARNING = \"\"\"\nSource distribution (sdist) packages come with PRE-BUILT BokehJS files.\n\nBuilding/installing from the bokehjs source directory of sdist packages is\ndisabled, and the options --build-js and --install-js will be IGNORED.\n\nTo build or develop BokehJS yourself, you must clone the full Bokeh GitHub\nrepository from https://github.com/bokeh/bokeh\n\"\"\"\n",
"path": "_setup_support.py"
}
] | [
{
"content": "'''\n\n'''\nimport shutil\nfrom glob import glob\nfrom os.path import dirname, exists, join, realpath, relpath\nimport os, re, subprocess, sys, time\n\nimport versioneer\n\n# provide fallbacks for highlights in case colorama is not installed\ntry:\n import colorama\n from colorama import Fore, Style\n\n def bright(text): return \"%s%s%s\" % (Style.BRIGHT, text, Style.RESET_ALL)\n def dim(text): return \"%s%s%s\" % (Style.DIM, text, Style.RESET_ALL)\n def red(text): return \"%s%s%s\" % (Fore.RED, text, Style.RESET_ALL)\n def green(text): return \"%s%s%s\" % (Fore.GREEN, text, Style.RESET_ALL)\n def yellow(text): return \"%s%s%s\" % (Fore.YELLOW, text, Style.RESET_ALL)\n sys.platform == \"win32\" and colorama.init()\nexcept ImportError:\n def bright(text): return text\n def dim(text): return text\n def red(text) : return text\n def green(text) : return text\n def yellow(text) : return text\n\n# -----------------------------------------------------------------------------\n# Module global variables\n# -----------------------------------------------------------------------------\n\nROOT = dirname(realpath(__file__))\nBOKEHJSROOT = join(ROOT, 'bokehjs')\nBOKEHJSBUILD = join(BOKEHJSROOT, 'build')\nJS = join(BOKEHJSBUILD, 'js')\nSERVER = join(ROOT, 'bokeh/server')\nTSLIB = join(BOKEHJSROOT , 'node_modules/typescript/lib')\n\n# -----------------------------------------------------------------------------\n# Helpers for command line operations\n# -----------------------------------------------------------------------------\n\ndef show_bokehjs(bokehjs_action, develop=False):\n ''' Print a useful report after setuptools output describing where and how\n BokehJS is installed.\n\n Args:\n bokehjs_action (str) : one of 'built', 'installed', or 'packaged'\n how (or if) BokehJS was installed into the python source tree\n\n develop (bool, optional) :\n whether the command was for \"develop\" mode (default: False)\n\n Returns:\n None\n\n '''\n print()\n if develop:\n print(\"Installed Bokeh for DEVELOPMENT:\")\n else:\n print(\"Installed Bokeh:\")\n if bokehjs_action in ['built', 'installed']:\n print(\" - using %s built BokehJS from bokehjs/build\\n\" % (bright(yellow(\"NEWLY\")) if bokehjs_action=='built' else bright(yellow(\"PREVIOUSLY\"))))\n else:\n print(\" - using %s BokehJS, located in 'bokeh.server.static'\\n\" % bright(yellow(\"PACKAGED\")))\n print()\n\ndef show_help(bokehjs_action):\n ''' Print information about extra Bokeh-specific command line options.\n\n Args:\n bokehjs_action (str) : one of 'built', 'installed', or 'packaged'\n how (or if) BokehJS was installed into the python source tree\n\n Returns:\n None\n\n '''\n print()\n if bokehjs_action in ['built', 'installed']:\n print(\"Bokeh-specific options available with 'install' or 'develop':\")\n print()\n print(\" --build-js build and install a fresh BokehJS\")\n print(\" --install-js install only last previously built BokehJS\")\n else:\n print(\"Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'\")\n print()\n print(\"No extra Bokeh-specific options are available.\")\n print()\n\n# -----------------------------------------------------------------------------\n# Other functions used directly by setup.py\n# -----------------------------------------------------------------------------\n\ndef build_or_install_bokehjs():\n ''' Build a new BokehJS (and install it) or install a previously build\n BokehJS.\n\n If no options ``--build-js`` or ``--install-js`` are detected, the\n user is prompted for what to do.\n\n If ``--existing-js`` is detected, then this setup.py is being run from a\n packaged sdist, no action is taken.\n\n Note that ``-build-js`` is only compatible with the following ``setup.py``\n commands: install, develop, sdist, egg_info, build\n\n Returns:\n str : one of 'built', 'installed', 'packaged'\n How (or if) BokehJS was installed into the python source tree\n\n '''\n\n # This happens when building from inside a published, pre-packaged sdist\n # The --existing-js option is not otherwise documented\n if '--existing-js' in sys.argv:\n sys.argv.remove('--existing-js')\n return \"packaged\"\n\n if '--build-js' not in sys.argv and '--install-js' not in sys.argv:\n jsbuild = jsbuild_prompt()\n\n elif '--build-js' in sys.argv:\n jsbuild = True\n sys.argv.remove('--build-js')\n\n # must be \"--install-js\"\n else:\n jsbuild = False\n sys.argv.remove('--install-js')\n\n jsbuild_ok = ('install', 'develop', 'sdist', 'egg_info', 'build')\n if jsbuild and not any(arg in sys.argv for arg in jsbuild_ok):\n print(\"Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.\")\n sys.exit(1)\n\n if jsbuild:\n build_js()\n install_js()\n return \"built\"\n else:\n install_js()\n return \"installed\"\n\ndef conda_rendering():\n return os.getenv(\"CONDA_BUILD_STATE\" ,\"junk\") == \"RENDER\"\n\n\ndef fixup_building_sdist():\n ''' Check for 'sdist' and ensure we always build BokehJS when packaging\n\n Source distributions do not ship with BokehJS source code, but must ship\n with a pre-built BokehJS library. This function modifies ``sys.argv`` as\n necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.\n\n Returns:\n None\n\n '''\n if \"sdist\" in sys.argv:\n if \"--install-js\" in sys.argv:\n print(\"Removing '--install-js' incompatible with 'sdist'\")\n sys.argv.remove('--install-js')\n if \"--build-js\" not in sys.argv:\n print(\"Adding '--build-js' required for 'sdist'\")\n sys.argv.append('--build-js')\n\ndef fixup_for_packaged():\n ''' If we are installing FROM an sdist, then a pre-built BokehJS is\n already installed in the python source tree.\n\n The command line options ``--build-js`` or ``--install-js`` are\n removed from ``sys.argv``, with a warning.\n\n Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is\n already packaged.\n\n Returns:\n None\n\n '''\n if exists(join(ROOT, 'PKG-INFO')):\n if \"--build-js\" in sys.argv or \"--install-js\" in sys.argv:\n print(SDIST_BUILD_WARNING)\n if \"--build-js\" in sys.argv:\n sys.argv.remove('--build-js')\n if \"--install-js\" in sys.argv:\n sys.argv.remove('--install-js')\n if \"--existing-js\" not in sys.argv:\n sys.argv.append('--existing-js')\n\n# Horrible hack: workaround to allow creation of bdist_wheel on pip\n# installation. Why, for God's sake, is pip forcing the generation of wheels\n# when installing a package?\ndef get_cmdclass():\n ''' A ``cmdclass`` that works around a setuptools deficiency.\n\n There is no need to build wheels when installing a package, however some\n versions of setuptools seem to mandate this. This is a hacky workaround\n that modifies the ``cmdclass`` returned by versioneer so that not having\n wheel installed is not a fatal error.\n\n '''\n cmdclass = versioneer.get_cmdclass()\n\n try:\n from wheel.bdist_wheel import bdist_wheel\n except ImportError:\n # pip is not claiming for bdist_wheel when wheel is not installed\n bdist_wheel = None\n\n if bdist_wheel is not None:\n cmdclass[\"bdist_wheel\"] = bdist_wheel\n\n return cmdclass\n\ndef get_package_data():\n ''' All of all of the \"extra\" package data files collected by the\n ``package_files`` and ``package_path`` functions in ``setup.py``.\n\n '''\n return { 'bokeh': _PACKAGE_DATA }\n\ndef get_version():\n ''' The version of Bokeh currently checked out\n\n Returns:\n str : the version string\n\n '''\n return versioneer.get_version()\n\n# -----------------------------------------------------------------------------\n# Helpers for operation in the bokehjs dir\n# -----------------------------------------------------------------------------\n\ndef jsbuild_prompt():\n ''' Prompt users whether to build a new BokehJS or install an existing one.\n\n Returns:\n bool : True, if a new build is requested, False otherwise\n\n '''\n print(BOKEHJS_BUILD_PROMPT)\n mapping = {\"1\": True, \"2\": False}\n value = input(\"Choice? \")\n while value not in mapping:\n print(\"Input '%s' not understood. Valid choices: 1, 2\\n\" % value)\n value = input(\"Choice? \")\n return mapping[value]\n\n# -----------------------------------------------------------------------------\n# Helpers for operations in the bokehjs dir\n# -----------------------------------------------------------------------------\n\ndef build_js():\n ''' Build BokehJS files under the ``bokehjs`` source subdirectory.\n\n Also prints a table of statistics about the generated assets (file sizes,\n etc.) or any error messages if the build fails.\n\n Note this function only builds BokehJS assets, it does not install them\n into the python source tree.\n\n '''\n print(\"Building BokehJS... \", end=\"\")\n sys.stdout.flush()\n os.chdir('bokehjs')\n\n cmd = [\"node\", \"make\", 'build', '--emit-error']\n\n t0 = time.time()\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except OSError as e:\n print(BUILD_EXEC_FAIL_MSG % (cmd, e))\n sys.exit(1)\n finally:\n os.chdir('..')\n\n result = proc.wait()\n t1 = time.time()\n\n if result != 0:\n indented_msg = \"\"\n outmsg = proc.stdout.read().decode('ascii', errors='ignore')\n outmsg = \"\\n\".join(\" \" + x for x in outmsg.split(\"\\n\"))\n errmsg = proc.stderr.read().decode('ascii', errors='ignore')\n errmsg = \"\\n\".join(\" \" + x for x in errmsg.split(\"\\n\"))\n print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))\n sys.exit(1)\n\n indented_msg = \"\"\n msg = proc.stdout.read().decode('ascii', errors='ignore')\n pat = re.compile(r\"(\\[.*\\]) (.*)\", re.DOTALL)\n for line in msg.strip().split(\"\\n\"):\n m = pat.match(line)\n if not m: continue # skip generate.py output lines\n stamp, txt = m.groups()\n indented_msg += \" \" + dim(green(stamp)) + \" \" + dim(txt) + \"\\n\"\n msg = \"\\n\".join(\" \" + x for x in msg.split(\"\\n\"))\n print(BUILD_SUCCESS_MSG % indented_msg)\n print(\"Build time: %s\" % bright(yellow(\"%0.1f seconds\" % (t1-t0))))\n print()\n print(\"Build artifact sizes:\")\n try:\n def size(*path):\n return os.stat(join(\"bokehjs\", \"build\", *path)).st_size / 2**10\n\n print(\" - bokeh.js : %6.1f KB\" % size(\"js\", \"bokeh.js\"))\n print(\" - bokeh.min.js : %6.1f KB\" % size(\"js\", \"bokeh.min.js\"))\n\n print(\" - bokeh-widgets.js : %6.1f KB\" % size(\"js\", \"bokeh-widgets.js\"))\n print(\" - bokeh-widgets.min.js : %6.1f KB\" % size(\"js\", \"bokeh-widgets.min.js\"))\n\n print(\" - bokeh-tables.js : %6.1f KB\" % size(\"js\", \"bokeh-tables.js\"))\n print(\" - bokeh-tables.min.js : %6.1f KB\" % size(\"js\", \"bokeh-tables.min.js\"))\n\n print(\" - bokeh-api.js : %6.1f KB\" % size(\"js\", \"bokeh-api.js\"))\n print(\" - bokeh-api.min.js : %6.1f KB\" % size(\"js\", \"bokeh-api.min.js\"))\n except Exception as e:\n print(BUILD_SIZE_FAIL_MSG % e)\n sys.exit(1)\n\ndef install_js():\n ''' Copy built BokehJS files into the Python source tree.\n\n Returns:\n None\n\n '''\n target_jsdir = join(SERVER, 'static', 'js')\n target_tslibdir = join(SERVER, 'static', 'lib')\n\n STATIC_ASSETS = [\n join(JS, 'bokeh.js'),\n join(JS, 'bokeh.min.js'),\n ]\n if not all(exists(a) for a in STATIC_ASSETS):\n print(BOKEHJS_INSTALL_FAIL)\n sys.exit(1)\n\n if exists(target_jsdir):\n shutil.rmtree(target_jsdir)\n shutil.copytree(JS, target_jsdir)\n\n if exists(target_tslibdir):\n shutil.rmtree(target_tslibdir)\n if exists(TSLIB):\n os.mkdir(target_tslibdir)\n for lib_file in glob(join(TSLIB, \"lib.*.d.ts\")):\n shutil.copy(lib_file, target_tslibdir)\n\n# -----------------------------------------------------------------------------\n# Helpers for collecting package data\n# -----------------------------------------------------------------------------\n\n_PACKAGE_DATA = []\n\ndef package_files(*paths):\n '''\n\n '''\n _PACKAGE_DATA.extend(paths)\n\ndef package_path(path, filters=()):\n '''\n\n '''\n if not os.path.exists(path):\n raise RuntimeError(\"packaging non-existent path: %s\" % path)\n elif os.path.isfile(path):\n _PACKAGE_DATA.append(relpath(path, 'bokeh'))\n else:\n for path, dirs, files in os.walk(path):\n path = relpath(path, 'bokeh')\n for f in files:\n if not filters or f.endswith(filters):\n _PACKAGE_DATA.append(join(path, f))\n\n# -----------------------------------------------------------------------------\n# Status and error message strings\n# -----------------------------------------------------------------------------\n\nBOKEHJS_BUILD_PROMPT = \"\"\"\nBokeh includes a JavaScript library (BokehJS) that has its own\nbuild process. How would you like to handle BokehJS:\n\n1) build and install fresh BokehJS\n2) install last built BokehJS from bokeh/bokehjs/build\n\"\"\"\n\nBOKEHJS_INSTALL_FAIL = \"\"\"\nERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.\n\n\nPlease build BokehJS by running setup.py with the `--build-js` option.\n Dev Guide: https://docs.bokeh.org/en/latest/docs/dev_guide/setup.html.\n\"\"\"\n\nBUILD_EXEC_FAIL_MSG = bright(red(\"Failed.\")) + \"\"\"\n\nERROR: subprocess.Popen(%r) failed to execute:\n\n %s\n\nHave you run `npm ci` from the bokehjs subdirectory?\nFor more information, see the Dev Guide:\n\n https://docs.bokeh.org/en/latest/docs/dev_guide.html\n\"\"\"\n\nBUILD_FAIL_MSG = bright(red(\"Failed.\")) + \"\"\"\n\nERROR: 'node make build' returned the following\n\n---- on stdout:\n%s\n\n---- on stderr:\n%s\n\"\"\"\n\nBUILD_SIZE_FAIL_MSG = \"\"\"\nERROR: could not determine sizes:\n\n %s\n\"\"\"\n\nBUILD_SUCCESS_MSG = bright(green(\"Success!\")) + \"\"\"\n\nBuild output:\n\n%s\"\"\"\n\nSDIST_BUILD_WARNING = \"\"\"\nSource distribution (sdist) packages come with PRE-BUILT BokehJS files.\n\nBuilding/installing from the bokehjs source directory of sdist packages is\ndisabled, and the options --build-js and --install-js will be IGNORED.\n\nTo build or develop BokehJS yourself, you must clone the full Bokeh GitHub\nrepository from https://github.com/bokeh/bokeh\n\"\"\"\n",
"path": "_setup_support.py"
}
] | diff --git a/_setup_support.py b/_setup_support.py
index 80bb17de726..e9affaf8933 100644
--- a/_setup_support.py
+++ b/_setup_support.py
@@ -406,7 +406,7 @@ def package_path(path, filters=()):
%s
-Have you run `npm install --no-save` from the bokehjs subdirectory?
+Have you run `npm ci` from the bokehjs subdirectory?
For more information, see the Dev Guide:
https://docs.bokeh.org/en/latest/docs/dev_guide.html
diff --git a/bokehjs/LICENSE b/bokehjs/LICENSE
index b31dc04a84c..b0ce3e6ea57 100644
--- a/bokehjs/LICENSE
+++ b/bokehjs/LICENSE
@@ -45,6 +45,6 @@ complete set of unique licenses found in BokehJS packages is listed here:
A detailed list of specific package licenses may be obtained by executing the
following commands in the "bokehjs" subdirectory:
- $ npm install
+ $ npm ci
$ npx license-checker --production --csv
diff --git a/docker-tools/Dockerfile-from-source b/docker-tools/Dockerfile-from-source
index 244388d1b23..faf509b6e65 100644
--- a/docker-tools/Dockerfile-from-source
+++ b/docker-tools/Dockerfile-from-source
@@ -38,7 +38,7 @@ RUN conda install --yes --quiet `python scripts/deps.py build`
RUN npm install -g npm
WORKDIR $BOKEH_SOURCE_DIR/bokehjs
# build BokehJS
-RUN npm install --no-save --no-progress
+RUN npm ci --no-progress
RUN sh -c 'if [[ -d make ]]; then node make build; else node_modules/.bin/gulp build; fi'
WORKDIR $BOKEH_SOURCE_DIR
# build a noarch conda package for Bokeh using the just-built BokehJS
diff --git a/docker-tools/npm-install.sh b/docker-tools/npm-install.sh
index 278c96a3c48..f0b26c6c996 100755
--- a/docker-tools/npm-install.sh
+++ b/docker-tools/npm-install.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-COMMAND="cd /bokeh/bokehjs && npm install --no-save --no-progress"
+COMMAND="cd /bokeh/bokehjs && npm ci --no-progress"
source "$(dirname $0)/base.sh"
diff --git a/scripts/ci/appveyor/build.ps1 b/scripts/ci/appveyor/build.ps1
index 4cd08c654b3..a2e07d900ef 100644
--- a/scripts/ci/appveyor/build.ps1
+++ b/scripts/ci/appveyor/build.ps1
@@ -3,7 +3,7 @@ set-psdebug -trace 2
function build() {
npm install -g npm
Push-Location -Path ".\\bokehjs"
- npm install --no-save --no-progress
+ npm ci --no-progress
Pop-Location
python setup.py -q install --build-js
}
diff --git a/scripts/ci/install.build b/scripts/ci/install.build
index 40e7e5f8e45..574c800429c 100755
--- a/scripts/ci/install.build
+++ b/scripts/ci/install.build
@@ -22,5 +22,5 @@ npm install -g npm
# install NPM dependencies
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
popd
diff --git a/scripts/ci/install.codebase b/scripts/ci/install.codebase
index 827a95aefba..1600d495c2f 100755
--- a/scripts/ci/install.codebase
+++ b/scripts/ci/install.codebase
@@ -8,5 +8,5 @@ set -x # echo commands
# install NPM dependencies
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
popd
diff --git a/scripts/ci/install.deploy b/scripts/ci/install.deploy
index 785f7f20e45..711df98a8a3 100755
--- a/scripts/ci/install.deploy
+++ b/scripts/ci/install.deploy
@@ -23,7 +23,7 @@ npm install -g npm
# install NPM dependencies
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
popd
# install sampledata
diff --git a/scripts/ci/install.examples b/scripts/ci/install.examples
index 4daaa36a272..3237fb49b9a 100755
--- a/scripts/ci/install.examples
+++ b/scripts/ci/install.examples
@@ -5,6 +5,6 @@ set -x # echo commands
# install NPM dependencies and build JS examples
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
node make examples --no-build
popd
diff --git a/scripts/ci/install.js b/scripts/ci/install.js
index b339e359ac4..91b2318720d 100755
--- a/scripts/ci/install.js
+++ b/scripts/ci/install.js
@@ -5,5 +5,5 @@ set -x # echo commands
# install NPM dependencies
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
popd
diff --git a/scripts/ci/install.unit b/scripts/ci/install.unit
index 186fa00dc2b..fd579437b4c 100755
--- a/scripts/ci/install.unit
+++ b/scripts/ci/install.unit
@@ -5,7 +5,7 @@ set -x # echo commands
# install NPM dependencies
pushd bokehjs
-npm install --no-save --no-progress
+npm ci --no-progress
popd
if [[ ! -z "${MINIMAL}" ]]; then
diff --git a/sphinx/source/docs/dev_guide/setup.rst b/sphinx/source/docs/dev_guide/setup.rst
index ffa119e4b3a..bbb11b09287 100644
--- a/sphinx/source/docs/dev_guide/setup.rst
+++ b/sphinx/source/docs/dev_guide/setup.rst
@@ -182,7 +182,7 @@ to install all of BokehJS JavaScript dependencies:
.. code-block:: sh
- npm install --no-save
+ npm ci
This command will install the necessary packages into the ``node_modules``
subdirectory.
|
keras-team__keras-nlp-1166 | Add compute_output_shape method to WordPieceTokenizer
When we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises
```
RuntimeError: Exception encountered when calling WordPieceTokenizer.call().
Could not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered:
'string'
Arguments received by WordPieceTokenizer.call():
• args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',)
• kwargs=<class 'inspect._empty'>
```
cc: @mattdangerw
| [
{
"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n",
"path": "keras_nlp/tokenizers/tokenizer.py"
}
] | [
{
"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n\n def compute_output_shape(self, inputs_shape):\n return tuple(inputs_shape) + (self.sequence_length,)\n",
"path": "keras_nlp/tokenizers/tokenizer.py"
}
] | diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py
index dbc7c4bbdb..e03123b0c5 100644
--- a/keras_nlp/tokenizers/tokenizer.py
+++ b/keras_nlp/tokenizers/tokenizer.py
@@ -123,3 +123,6 @@ def token_to_id(self, token: str) -> int:
def call(self, inputs, *args, training=None, **kwargs):
return self.tokenize(inputs, *args, **kwargs)
+
+ def compute_output_shape(self, inputs_shape):
+ return tuple(inputs_shape) + (self.sequence_length,)
|
djangopackages__djangopackages-267 | Add errormator.com
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Heroku specific settings. These are used to deploy opencomparison to\nHeroku's platform.\n\"\"\"\n\n\nfrom os import environ\n\nfrom memcacheify import memcacheify\nfrom postgresify import postgresify\nfrom S3 import CallingFormat\n\nfrom settings.base import *\n\n\n########## CACHE\nCACHE_TIMEOUT = 60 * 60 * 24 * 30\nCACHES = memcacheify()\n\n\n########## WSGI SERVER\nINSTALLED_APPS += ['gunicorn']\n\n\n########## EMAIL\nDEFAULT_FROM_EMAIL = environ.get('DEFAULT_FROM_EMAIL',\n 'Django Packages <[email protected]>')\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.sendgrid.com')\nEMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD', '')\nEMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME', '')\nEMAIL_PORT = environ.get('EMAIL_PORT', 587)\nEMAIL_SUBJECT_PREFIX = environ.get('EMAIL_SUBJECT_PREFIX', '[Django Packages] ')\nEMAIL_USE_TLS = True\nSERVER_EMAIL = EMAIL_HOST_USER\n\n\n########## SECRET\nSECRET_KEY = environ.get('SECRET_KEY', '')\n\n\n########## GITHUB\nGITHUB_API_SECRET = environ.get('GITHUB_API_SECRET')\nGITHUB_APP_ID = environ.get('GITHUB_APP_ID')\n\n\n########## SITE\nSITE_TITLE = environ.get('SITE_TITLE')\nFRAMEWORK_TITLE = environ.get('FRAMEWORK_TITLE')\n\n\n########## STORAGE\nINSTALLED_APPS += ['storages']\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\nSTATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\nAWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = environ.get('AWS_STORAGE_BUCKET_NAME')\n\nAWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN\nAWS_HEADERS = {\n 'Expires': 'Thu, 15 Apr 2020 20:00:00 GMT',\n 'Cache-Control': 'max-age=86400',\n}\nAWS_QUERYSTRING_AUTH = False\n\nSTATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME\nMEDIA_URL = STATIC_URL\n\n\n########### Permissions\nRESTRICT_PACKAGE_EDITORS = False\nRESTRICT_GRID_EDITORS = False\n\n########### Errors\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n########## DATABASE CONFIGURATION\n# Setting PGSQL_POOLING to True means:\n# We use django_postgrespool to handle the database connection.\n# What this means is we use SqlAlchemy to handle the pool to PGSQL on Heroku, meaning we don't have\n# to reestablish connection to the database as often. Which means a faster app. The downside is there\n# is some risk as it's still a new project.\n#\n# Setting PGSQL_POOLING to False means:\n# We use the standard Django pgsql connection. The pooling isn't as good but we have more stability.\nPGSQL_POOLING = False\n\n\nif PGSQL_POOLING:\n import dj_database_url\n\n DATABASES = {'default': dj_database_url.config()}\n DATABASES['default']['ENGINE'] = 'django_postgrespool'\n\n SOUTH_DATABASE_ADAPTERS = {\n 'default': 'south.db.postgresql_psycopg2'\n }\n\n DATABASE_POOL_ARGS = {\n 'max_overflow': 10,\n 'pool_size': 5,\n 'recycle': 300\n }\nelse:\n from postgresify import postgresify\n\n DATABASES = postgresify()\n########## END DATABASE CONFIGURATION\n\n\n########## sslify\nMIDDLEWARE_CLASSES = ('sslify.middleware.SSLifyMiddleware',) + MIDDLEWARE_CLASSES\n########## end sslify\n\n########## django-secure\n\nINSTALLED_APPS += [\"djangosecure\", ]\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_FRAME_DENY = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = True\n\n########## end django-secure\n\n\n########## templates\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )),\n)\n\n########## end templates\n",
"path": "settings/heroku.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Heroku specific settings. These are used to deploy opencomparison to\nHeroku's platform.\n\"\"\"\n\n\nfrom os import environ\n\nfrom memcacheify import memcacheify\nfrom postgresify import postgresify\nfrom S3 import CallingFormat\n\nfrom settings.base import *\n\n\n########## CACHE\nCACHE_TIMEOUT = 60 * 60 * 24 * 30\nCACHES = memcacheify()\n\n\n########## WSGI SERVER\nINSTALLED_APPS += ['gunicorn']\n\n\n########## EMAIL\nDEFAULT_FROM_EMAIL = environ.get('DEFAULT_FROM_EMAIL',\n 'Django Packages <[email protected]>')\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.sendgrid.com')\nEMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD', '')\nEMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME', '')\nEMAIL_PORT = environ.get('EMAIL_PORT', 587)\nEMAIL_SUBJECT_PREFIX = environ.get('EMAIL_SUBJECT_PREFIX', '[Django Packages] ')\nEMAIL_USE_TLS = True\nSERVER_EMAIL = EMAIL_HOST_USER\n\n\n########## SECRET\nSECRET_KEY = environ.get('SECRET_KEY', '')\n\n\n########## GITHUB\nGITHUB_API_SECRET = environ.get('GITHUB_API_SECRET')\nGITHUB_APP_ID = environ.get('GITHUB_APP_ID')\n\n\n########## SITE\nSITE_TITLE = environ.get('SITE_TITLE')\nFRAMEWORK_TITLE = environ.get('FRAMEWORK_TITLE')\n\n\n########## STORAGE\nINSTALLED_APPS += ['storages']\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\nSTATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\nAWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = environ.get('AWS_STORAGE_BUCKET_NAME')\n\nAWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN\nAWS_HEADERS = {\n 'Expires': 'Thu, 15 Apr 2020 20:00:00 GMT',\n 'Cache-Control': 'max-age=86400',\n}\nAWS_QUERYSTRING_AUTH = False\n\nSTATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME\nMEDIA_URL = STATIC_URL\n\n\n########### Permissions\nRESTRICT_PACKAGE_EDITORS = False\nRESTRICT_GRID_EDITORS = False\n\n########### Errors\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n########## DATABASE CONFIGURATION\n# Setting PGSQL_POOLING to True means:\n# We use django_postgrespool to handle the database connection.\n# What this means is we use SqlAlchemy to handle the pool to PGSQL on Heroku, meaning we don't have\n# to reestablish connection to the database as often. Which means a faster app. The downside is there\n# is some risk as it's still a new project.\n#\n# Setting PGSQL_POOLING to False means:\n# We use the standard Django pgsql connection. The pooling isn't as good but we have more stability.\nPGSQL_POOLING = False\n\n\nif PGSQL_POOLING:\n import dj_database_url\n\n DATABASES = {'default': dj_database_url.config()}\n DATABASES['default']['ENGINE'] = 'django_postgrespool'\n\n SOUTH_DATABASE_ADAPTERS = {\n 'default': 'south.db.postgresql_psycopg2'\n }\n\n DATABASE_POOL_ARGS = {\n 'max_overflow': 10,\n 'pool_size': 5,\n 'recycle': 300\n }\nelse:\n from postgresify import postgresify\n\n DATABASES = postgresify()\n########## END DATABASE CONFIGURATION\n\n\n########## sslify\nMIDDLEWARE_CLASSES = ('sslify.middleware.SSLifyMiddleware',) + MIDDLEWARE_CLASSES\n########## end sslify\n\n########## django-secure\n\nINSTALLED_APPS += [\"djangosecure\", ]\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_FRAME_DENY = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = True\n\n########## end django-secure\n\n\n########## templates\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )),\n)\n\n########## end templates\n\n#-------------------\n# appenlight-client\n#------------------\n\nimport appenlight_client.client as e_client\nAPPENLIGHT = e_client.get_config({'appenlight.api_key': os.environ.get('APPENLIGHT_KEY', '')})\n\nMIDDLEWARE_CLASSES += (\n 'appenlight_client.django_middleware.AppenlightMiddleware',\n)",
"path": "settings/heroku.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index a6f0c0f28..194b08b16 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -40,3 +40,4 @@ raven==1.4.6
rq==0.3.8
requests==1.2.3
six==1.1.0
+appenlight-client==0.6.4
\ No newline at end of file
diff --git a/settings/heroku.py b/settings/heroku.py
index f555a4450..d257723d9 100644
--- a/settings/heroku.py
+++ b/settings/heroku.py
@@ -168,3 +168,14 @@
)
########## end templates
+
+#-------------------
+# appenlight-client
+#------------------
+
+import appenlight_client.client as e_client
+APPENLIGHT = e_client.get_config({'appenlight.api_key': os.environ.get('APPENLIGHT_KEY', '')})
+
+MIDDLEWARE_CLASSES += (
+ 'appenlight_client.django_middleware.AppenlightMiddleware',
+)
\ No newline at end of file
|
kivy__kivy-611 | Label Text Clipped Horizontally (Moved)
**Originally reported as a continuation of #576 by esbullington**
I think I'm having trouble with this same issue. I'm trying to use markup with a Label, and am finding that my Label text is cut-off along the horizontal axis if I have markup set to True. This probably is only occurring with the latest development version, even after the above path was pulled. The problem does not occur with Kivy 1.3.0. If needed, I can re-install the development version and make a screen shot, but for now I'm working with Kivy 1.3.0.
I've only started working with Kivy in the past few days, so I'm not yet in a place where I feel comfortable sending in a patch. (awesome framework, by the way, congrats on the great work!).
Oh, and it doesn't look like I can re-open the issue, so someone else may wish to do so, or else tell me to open another issue for the problem.
UPDATE: I coped markup.py from Kivy 1.3 to Kivy1.4-dev and it resolved this issue for me. I may now have problems with rst, but at least my markdown labels aren't cut in half.
| [
{
"content": "'''\nText Markup\n===========\n\n.. versionadded:: 1.1.0\n\nWe provide a simple text-markup for inline text styling. The syntax look the\nsame as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.\n\nA tag is defined as ``[tag]``, and might have a closed tag associated:\n``[/tag]``. Example of a markup text::\n\n [b]Hello [color=ff0000]world[/b][/color]\n\nThe following tags are availables:\n\n``[b][/b]``\n Activate bold text\n``[i][/i]``\n Activate italic text\n``[font=<str>][/font]``\n Change the font\n``[size=<integer>][/size]``\n Change the font size\n``[color=#<color>][/color]``\n Change the text color\n``[ref=<str>][/ref]``\n Add an interactive zone. The reference + all the word box inside the\n reference will be available in :data:`MarkupLabel.refs`\n``[anchor=<str>]``\n Put an anchor in the text. You can get the position of your anchor within\n the text with :data:`MarkupLabel.anchors`\n\nIf you need to escape the markup from the current text, use\n:func:`kivy.utils.escape_markup`.\n'''\n\n__all__ = ('MarkupLabel', )\n\nfrom kivy.graphics.texture import Texture\nfrom kivy.utils import platform\nfrom kivy.parser import parse_color\nfrom kivy.logger import Logger\nimport re\nfrom kivy.core.text import Label, LabelBase\nfrom copy import copy\n\n# We need to do this trick when documentation is generated\nMarkupLabelBase = Label\nif Label is None:\n MarkupLabelBase = LabelBase\n\n\nclass MarkupLabel(MarkupLabelBase):\n '''Markup text label.\n\n See module documentation for more informations.\n '''\n\n def __init__(self, *largs, **kwargs):\n self._style_stack = {}\n self._refs = {}\n super(MarkupLabel, self).__init__(*largs, **kwargs)\n\n @property\n def refs(self):\n '''Get the bounding box of all the ``[ref=...]``::\n\n { 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }\n '''\n return self._refs\n\n @property\n def anchors(self):\n '''Get the position of all the ``[anchor=...]``::\n\n { 'anchorA': (x, y), 'anchorB': (x, y), ... }\n '''\n return self._anchors\n\n @property\n def markup(self):\n '''Return the text with all the markup splitted::\n\n >>> MarkupLabel('[b]Hello world[/b]').markup\n >>> ('[b]', 'Hello world', '[/b]')\n\n '''\n s = re.split('(\\[.*?\\])', self.label)\n s = [x for x in s if x != '']\n return s\n\n def _push_style(self, k):\n if not k in self._style_stack:\n self._style_stack[k] = []\n self._style_stack[k].append(self.options[k])\n\n def _pop_style(self, k):\n if k not in self._style_stack or len(self._style_stack[k]) == 0:\n Logger.warning('Label: pop style stack without push')\n return\n v = self._style_stack[k].pop()\n self.options[k] = v\n\n def render(self, real=False):\n options = copy(self.options)\n if not real:\n ret = self._pre_render()\n else:\n ret = self._real_render()\n self.options = options\n return ret\n\n def _pre_render(self):\n # split markup, words, and lines\n # result: list of word with position and width/height\n # during the first pass, we don't care about h/valign\n self._lines = lines = []\n self._refs = {}\n self._anchors = {}\n spush = self._push_style\n spop = self._pop_style\n options = self.options\n options['_ref'] = None\n for item in self.markup:\n if item == '[b]':\n spush('bold')\n options['bold'] = True\n self.resolve_font_name()\n elif item == '[/b]':\n spop('bold')\n self.resolve_font_name()\n elif item == '[i]':\n spush('italic')\n options['italic'] = True\n self.resolve_font_name()\n elif item == '[/i]':\n spop('italic')\n self.resolve_font_name()\n elif item[:6] == '[size=':\n try:\n size = int(item[6:-1])\n except ValueError:\n size = options['font_size']\n spush('font_size')\n options['font_size'] = size\n elif item == '[/size]':\n spop('font_size')\n elif item[:7] == '[color=':\n color = parse_color(item[7:-1])\n spush('color')\n options['color'] = color\n elif item == '[/color]':\n spop('color')\n elif item[:6] == '[font=':\n fontname = item[6:-1]\n spush('font_name')\n options['font_name'] = fontname\n self.resolve_font_name()\n elif item == '[/font]':\n spop('font_name')\n self.resolve_font_name()\n elif item[:5] == '[ref=':\n ref = item[5:-1]\n spush('_ref')\n options['_ref'] = ref\n elif item == '[/ref]':\n spop('_ref')\n elif item[:8] == '[anchor=':\n ref = item[8:-1]\n if len(lines):\n x, y = lines[-1][0:2]\n else:\n x = y = 0\n self._anchors[ref] = x, y\n else:\n item = item.replace('&bl;', '[').replace(\n '&br;', ']').replace('&', '&')\n self._pre_render_label(item, options, lines)\n\n # calculate the texture size\n w, h = self.text_size\n if h < 0:\n h = None\n if w < 0:\n w = None\n if w is None:\n w = max([line[0] for line in lines])\n if h is None:\n h = sum([line[1] for line in lines])\n return w, h\n\n def _pre_render_label(self, word, options, lines):\n # precalculate id/name\n if not self.fontid in self._cache_glyphs:\n self._cache_glyphs[self.fontid] = {}\n cache = self._cache_glyphs[self.fontid]\n\n # verify that each glyph have size\n glyphs = list(set(word))\n get_extents = self.get_extents\n for glyph in glyphs:\n if not glyph in cache:\n cache[glyph] = get_extents(glyph)\n\n # get last line information\n if len(lines):\n line = lines[-1]\n else:\n # line-> line width, line height, words\n # words -> (w, h, word)...\n line = [0, 0, []]\n lines.append(line)\n\n # extract user limitation\n uw, uh = self.text_size\n\n # split the word\n for part in re.split(r'( |\\n)', word):\n\n if part == '':\n continue\n\n if part == '\\n':\n # put a new line!\n line = [0, 0, []]\n lines.append(line)\n continue\n\n # get current line information\n lw, lh = line[:2]\n\n # calculate the size of the part\n # (extract all extents of the part,\n # calculate width through extents due to kerning\n # and get the maximum height)\n pg = [cache[g] for g in part]\n pw = get_extents(part)[0]\n ph = max([g[1] for g in pg])\n\n options = copy(options)\n\n # check if the part can be put in the line\n if uw is None or lw + pw < uw:\n # no limitation or part can be contained in the line\n # then append the part to the line\n line[2].append((pw, ph, part, options))\n # and update the line size\n line[0] += pw\n line[1] = max(line[1], ph)\n else:\n # part can't be put in the line, do a new one...\n line = [pw, ph, [(pw, ph, part, options)]]\n lines.append(line)\n\n def _real_render(self):\n # use the lines to do the rendering !\n self._render_begin()\n\n r = self._render_text\n\n # convert halign/valign to int, faster comparaison\n av = {'top': 0, 'middle': 1, 'bottom': 2}[self.options['valign']]\n ah = {'left': 0, 'center': 1, 'right': 2}[self.options['halign']]\n\n y = 0\n w, h = self._size\n refs = self._refs\n no_of_lines = len(self._lines)-1\n\n for line in self._lines:\n lh = line[1]\n lw = line[0]\n\n # horizontal alignement\n if ah == 0:\n x = 0\n elif ah == 1:\n x = int((w - lw) / 2)\n else:\n x = w - lw\n\n # vertical alignement\n if y == 0:\n if av == 1:\n y = int((h - (lh*no_of_lines))/2)\n elif av == 2:\n y = h - (lh*(no_of_lines))\n\n\n for pw, ph, part, options in line[2]:\n self.options = options\n r(part, x, y + (lh - ph) / 1.25)\n\n # should we record refs ?\n ref = options['_ref']\n if ref is not None:\n if not ref in refs:\n refs[ref] = []\n refs[ref].append((x, y, x + pw, y + ph))\n\n #print 'render', repr(part), x, y, (lh, ph), options\n x += pw\n y += line[1]\n\n # get data from provider\n data = self._render_end()\n assert(data)\n\n # create texture is necessary\n texture = self.texture\n mipmap = self.options['mipmap']\n if texture is None:\n if data is None:\n if platform() in ('android', 'ios'):\n colorfmt = 'rgba'\n else:\n colorfmt = 'luminance_alpha'\n texture = Texture.create(\n size=self.size, colorfmt=colorfmt,\n mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n elif self.width != texture.width or self.height != texture.height:\n if data is None:\n texture = Texture.create(size=self.size, mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n\n # update texture\n self.texture = texture\n self.texture.blit_data(data)\n\n",
"path": "kivy/core/text/markup.py"
}
] | [
{
"content": "'''\nText Markup\n===========\n\n.. versionadded:: 1.1.0\n\nWe provide a simple text-markup for inline text styling. The syntax look the\nsame as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.\n\nA tag is defined as ``[tag]``, and might have a closed tag associated:\n``[/tag]``. Example of a markup text::\n\n [b]Hello [color=ff0000]world[/b][/color]\n\nThe following tags are availables:\n\n``[b][/b]``\n Activate bold text\n``[i][/i]``\n Activate italic text\n``[font=<str>][/font]``\n Change the font\n``[size=<integer>][/size]``\n Change the font size\n``[color=#<color>][/color]``\n Change the text color\n``[ref=<str>][/ref]``\n Add an interactive zone. The reference + all the word box inside the\n reference will be available in :data:`MarkupLabel.refs`\n``[anchor=<str>]``\n Put an anchor in the text. You can get the position of your anchor within\n the text with :data:`MarkupLabel.anchors`\n\nIf you need to escape the markup from the current text, use\n:func:`kivy.utils.escape_markup`.\n'''\n\n__all__ = ('MarkupLabel', )\n\nfrom kivy.graphics.texture import Texture\nfrom kivy.utils import platform\nfrom kivy.parser import parse_color\nfrom kivy.logger import Logger\nimport re\nfrom kivy.core.text import Label, LabelBase\nfrom copy import copy\n\n# We need to do this trick when documentation is generated\nMarkupLabelBase = Label\nif Label is None:\n MarkupLabelBase = LabelBase\n\n\nclass MarkupLabel(MarkupLabelBase):\n '''Markup text label.\n\n See module documentation for more informations.\n '''\n\n def __init__(self, *largs, **kwargs):\n self._style_stack = {}\n self._refs = {}\n super(MarkupLabel, self).__init__(*largs, **kwargs)\n\n @property\n def refs(self):\n '''Get the bounding box of all the ``[ref=...]``::\n\n { 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }\n '''\n return self._refs\n\n @property\n def anchors(self):\n '''Get the position of all the ``[anchor=...]``::\n\n { 'anchorA': (x, y), 'anchorB': (x, y), ... }\n '''\n return self._anchors\n\n @property\n def markup(self):\n '''Return the text with all the markup splitted::\n\n >>> MarkupLabel('[b]Hello world[/b]').markup\n >>> ('[b]', 'Hello world', '[/b]')\n\n '''\n s = re.split('(\\[.*?\\])', self.label)\n s = [x for x in s if x != '']\n return s\n\n def _push_style(self, k):\n if not k in self._style_stack:\n self._style_stack[k] = []\n self._style_stack[k].append(self.options[k])\n\n def _pop_style(self, k):\n if k not in self._style_stack or len(self._style_stack[k]) == 0:\n Logger.warning('Label: pop style stack without push')\n return\n v = self._style_stack[k].pop()\n self.options[k] = v\n\n def render(self, real=False):\n options = copy(self.options)\n if not real:\n ret = self._pre_render()\n else:\n ret = self._real_render()\n self.options = options\n return ret\n\n def _pre_render(self):\n # split markup, words, and lines\n # result: list of word with position and width/height\n # during the first pass, we don't care about h/valign\n self._lines = lines = []\n self._refs = {}\n self._anchors = {}\n spush = self._push_style\n spop = self._pop_style\n options = self.options\n options['_ref'] = None\n for item in self.markup:\n if item == '[b]':\n spush('bold')\n options['bold'] = True\n self.resolve_font_name()\n elif item == '[/b]':\n spop('bold')\n self.resolve_font_name()\n elif item == '[i]':\n spush('italic')\n options['italic'] = True\n self.resolve_font_name()\n elif item == '[/i]':\n spop('italic')\n self.resolve_font_name()\n elif item[:6] == '[size=':\n try:\n size = int(item[6:-1])\n except ValueError:\n size = options['font_size']\n spush('font_size')\n options['font_size'] = size\n elif item == '[/size]':\n spop('font_size')\n elif item[:7] == '[color=':\n color = parse_color(item[7:-1])\n spush('color')\n options['color'] = color\n elif item == '[/color]':\n spop('color')\n elif item[:6] == '[font=':\n fontname = item[6:-1]\n spush('font_name')\n options['font_name'] = fontname\n self.resolve_font_name()\n elif item == '[/font]':\n spop('font_name')\n self.resolve_font_name()\n elif item[:5] == '[ref=':\n ref = item[5:-1]\n spush('_ref')\n options['_ref'] = ref\n elif item == '[/ref]':\n spop('_ref')\n elif item[:8] == '[anchor=':\n ref = item[8:-1]\n if len(lines):\n x, y = lines[-1][0:2]\n else:\n x = y = 0\n self._anchors[ref] = x, y\n else:\n item = item.replace('&bl;', '[').replace(\n '&br;', ']').replace('&', '&')\n self._pre_render_label(item, options, lines)\n\n # calculate the texture size\n w, h = self.text_size\n if h < 0:\n h = None\n if w < 0:\n w = None\n if w is None:\n w = max([line[0] for line in lines])\n if h is None:\n h = sum([line[1] for line in lines])\n return w, h\n\n def _pre_render_label(self, word, options, lines):\n # precalculate id/name\n if not self.fontid in self._cache_glyphs:\n self._cache_glyphs[self.fontid] = {}\n cache = self._cache_glyphs[self.fontid]\n\n # verify that each glyph have size\n glyphs = list(set(word))\n get_extents = self.get_extents\n for glyph in glyphs:\n if not glyph in cache:\n cache[glyph] = get_extents(glyph)\n\n # get last line information\n if len(lines):\n line = lines[-1]\n else:\n # line-> line width, line height, words\n # words -> (w, h, word)...\n line = [0, 0, []]\n lines.append(line)\n\n # extract user limitation\n uw, uh = self.text_size\n\n # split the word\n for part in re.split(r'( |\\n)', word):\n\n if part == '':\n continue\n\n if part == '\\n':\n # put a new line!\n line = [0, 0, []]\n lines.append(line)\n continue\n\n # get current line information\n lw, lh = line[:2]\n\n # calculate the size of the part\n # (extract all extents of the part,\n # calculate width through extents due to kerning\n # and get the maximum height)\n pg = [cache[g] for g in part]\n pw = get_extents(part)[0]\n ph = max([g[1] for g in pg])\n\n options = copy(options)\n\n # check if the part can be put in the line\n if uw is None or lw + pw < uw:\n # no limitation or part can be contained in the line\n # then append the part to the line\n line[2].append((pw, ph, part, options))\n # and update the line size\n line[0] += pw\n line[1] = max(line[1], ph)\n else:\n # part can't be put in the line, do a new one...\n line = [pw, ph, [(pw, ph, part, options)]]\n lines.append(line)\n\n def _real_render(self):\n # use the lines to do the rendering !\n self._render_begin()\n\n r = self._render_text\n\n # convert halign/valign to int, faster comparaison\n av = {'top': 0, 'middle': 1, 'bottom': 2}[self.options['valign']]\n ah = {'left': 0, 'center': 1, 'right': 2}[self.options['halign']]\n\n y = 0\n w, h = self._size\n refs = self._refs\n no_of_lines = len(self._lines)\n\n for line in self._lines:\n lh = line[1]\n lw = line[0]\n\n # horizontal alignement\n if ah == 0:\n x = 0\n elif ah == 1:\n x = int((w - lw) / 2)\n else:\n x = w - lw\n\n # vertical alignement\n if y == 0:\n if av == 1:\n y = int((h - (lh*no_of_lines))/2)\n elif av == 2:\n y = h - (lh*(no_of_lines))\n\n\n for pw, ph, part, options in line[2]:\n self.options = options\n r(part, x, y + (lh - ph) / 1.25)\n\n # should we record refs ?\n ref = options['_ref']\n if ref is not None:\n if not ref in refs:\n refs[ref] = []\n refs[ref].append((x, y, x + pw, y + ph))\n\n #print 'render', repr(part), x, y, (lh, ph), options\n x += pw\n y += line[1]\n\n # get data from provider\n data = self._render_end()\n assert(data)\n\n # create texture is necessary\n texture = self.texture\n mipmap = self.options['mipmap']\n if texture is None:\n if data is None:\n if platform() in ('android', 'ios'):\n colorfmt = 'rgba'\n else:\n colorfmt = 'luminance_alpha'\n texture = Texture.create(\n size=self.size, colorfmt=colorfmt,\n mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n elif self.width != texture.width or self.height != texture.height:\n if data is None:\n texture = Texture.create(size=self.size, mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n\n # update texture\n self.texture = texture\n self.texture.blit_data(data)\n\n",
"path": "kivy/core/text/markup.py"
}
] | diff --git a/kivy/core/text/markup.py b/kivy/core/text/markup.py
index 1d05d14dbc..46e41ea351 100644
--- a/kivy/core/text/markup.py
+++ b/kivy/core/text/markup.py
@@ -266,7 +266,7 @@ def _real_render(self):
y = 0
w, h = self._size
refs = self._refs
- no_of_lines = len(self._lines)-1
+ no_of_lines = len(self._lines)
for line in self._lines:
lh = line[1]
diff --git a/kivy/tests/test_issue_609.py b/kivy/tests/test_issue_609.py
new file mode 100644
index 0000000000..e1d2320474
--- /dev/null
+++ b/kivy/tests/test_issue_609.py
@@ -0,0 +1,19 @@
+from common import GraphicUnitTest
+
+
+class Issue609(GraphicUnitTest):
+
+ def test_markup_pos(self):
+ from kivy.uix.label import Label
+ from kivy.uix.gridlayout import GridLayout
+
+ lbl = Label(text="TextToTest")
+ lbl.bind(text_size=lbl.setter('size'))
+ mrkp = Label(text="TextToTest", markup = True)
+ mrkp.bind(text_size=mrkp.setter('size'))
+
+ grid = GridLayout(rows=1, size_hint=(1, 1))
+ grid.add_widget(lbl)
+ grid.add_widget(mrkp)
+
+ self.render(grid, 2)
|
OpenNMT__OpenNMT-tf-6 | Poor translation results with the Transformer
The Transformer model produces very bad translation results. Its implementation should be revised and fixed.
See also the reference implementation at https://github.com/tensorflow/tensor2tensor.
| [
{
"content": "\"\"\"Define functions related to the Google's Transformer model.\"\"\"\n\nimport tensorflow as tf\n\n\ndef scaled_dot_attention(queries,\n keys,\n values,\n mode,\n values_length=None,\n mask_future=False,\n dropout=0.0):\n \"\"\"Computes the scaled dot-product attention as described\n in https://arxiv.org/abs/1706.03762.\n\n Args:\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n keys: The sequence use to calculate attention scores. A tensor of shape\n :math:`[B, T_2, ...]`.\n values: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n mode: A ``tf.estimator.ModeKeys`` mode.\n values_length: The length of the values to attend.\n mask_future: Mask attention to future positions.\n dropout: The probability to drop units from the inputs.\n\n Returns:\n A tuple ``(context vector, attention vector)``.\n \"\"\"\n # Scaled dot-product between queries and keys.\n dot = tf.matmul(queries, keys, transpose_b=True)\n dot = tf.div(dot, tf.sqrt(tf.cast(tf.shape(keys)[-1], tf.float32)))\n\n if values_length is not None:\n # Give no weight to illegal connections.\n if mask_future:\n # When masking the future, a position can only attend to previous timesteps.\n mask = tf.map_fn(\n lambda x: tf.sequence_mask(\n tf.minimum(tf.range(tf.shape(values)[1]) + 1, x),\n maxlen=tf.shape(values)[1],\n dtype=tf.float32),\n values_length,\n dtype=tf.float32)\n else:\n # Otherwise, simply prevent attention on out-of-range positions.\n mask = tf.sequence_mask(\n values_length,\n maxlen=tf.shape(values)[1],\n dtype=tf.float32)\n mask = tf.expand_dims(mask, axis=1)\n\n dot = dot * mask + ((1.0 - mask) * tf.float32.min)\n\n # Compute attention weights.\n attn = tf.nn.softmax(dot)\n attn = tf.layers.dropout(\n attn,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Compute attention context.\n context = tf.matmul(attn, values)\n\n return context, attn\n\n\ndef multi_head_attention(num_heads,\n queries,\n keys,\n values,\n mode,\n values_length=None,\n mask_future=False,\n dropout=0.0):\n \"\"\"Computes the multi-head attention as described in\n https://arxiv.org/abs/1706.03762.\n\n Args:\n num_heads: The number of attention heads.\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n keys: The sequence use to calculate attention scores. A tensor of shape\n :math:`[B, T_2, ...]`.\n values: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n mode: A ``tf.estimator.ModeKeys`` mode.\n values_length: The length of the values to attend.\n mask_future: Mask attention to future positions.\n dropout: The probability to drop units from the inputs.\n\n Returns:\n The concatenated attention context of each head.\n \"\"\"\n input_dim = keys.get_shape().as_list()[-1]\n\n if input_dim % num_heads != 0:\n raise ValueError(\"Multi head attention requires the input dimension to be a\"\n \" multiple of {}\".format(num_heads))\n\n head_dim = input_dim / num_heads\n heads = []\n\n for i in range(num_heads):\n with tf.variable_scope(\"head_{}\".format(i)):\n # Project queries, keys and values to different and smaller subspaces.\n queries_proj = tf.layers.conv1d(queries, head_dim, 1)\n keys_proj = tf.layers.conv1d(keys, head_dim, 1)\n values_proj = tf.layers.conv1d(values, head_dim, 1)\n\n head_i, _ = scaled_dot_attention(\n queries_proj,\n keys_proj,\n values_proj,\n mode,\n values_length=values_length,\n mask_future=mask_future,\n dropout=dropout)\n\n heads.append(head_i)\n\n # Concatenate all heads output.\n combined = tf.concat(heads, axis=2)\n outputs = tf.layers.conv1d(combined, input_dim, 1)\n\n return outputs\n\ndef feed_forward(x, inner_dim):\n \"\"\"Implements the Transformer's \"Feed Forward\" layer.\n\n .. math::\n\n ffn(x) = max(0, x*W_1 + b_1)*W_2 + b_2\n\n Args:\n x: The input.\n inner_dim: The number of units of the inner linear transformation.\n\n Returns:\n The transformed input.\n \"\"\"\n input_dim = x.get_shape().as_list()[-1]\n\n inner = tf.layers.conv1d(x, inner_dim, 1, activation=tf.nn.relu)\n outer = tf.layers.conv1d(inner, input_dim, 1)\n\n return outer\n\ndef add_and_norm(inputs,\n outputs,\n mode,\n dropout=0.1):\n \"\"\"Implements the Transformer's \"Add & Norm\" layer.\n\n Args:\n inputs: The input of the previous layer.\n outputs: The output of the previous layer.\n mode: A ``tf.estimator.ModeKeys`` mode.\n dropout: The probability to drop units in :obj:`outputs`.\n\n Returns:\n The residual and normalized output.\n \"\"\"\n outputs = tf.layers.dropout(\n outputs,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n outputs += inputs\n outputs = tf.contrib.layers.layer_norm(outputs)\n return outputs\n",
"path": "opennmt/utils/transformer.py"
}
] | [
{
"content": "\"\"\"Define functions related to the Google's Transformer model.\"\"\"\n\nimport tensorflow as tf\n\n\ndef scaled_dot_attention(queries,\n keys,\n values,\n mode,\n values_length=None,\n mask_future=False,\n dropout=0.0):\n \"\"\"Computes the scaled dot-product attention as described\n in https://arxiv.org/abs/1706.03762.\n\n Args:\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n keys: The sequence use to calculate attention scores. A tensor of shape\n :math:`[B, T_2, ...]`.\n values: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n mode: A ``tf.estimator.ModeKeys`` mode.\n values_length: The length of the values to attend.\n mask_future: Mask attention to future positions.\n dropout: The probability to drop units from the inputs.\n\n Returns:\n A tuple ``(context vector, attention vector)``.\n \"\"\"\n # Scaled dot-product between queries and keys.\n dot = tf.matmul(queries, keys, transpose_b=True)\n dot = tf.div(dot, tf.sqrt(tf.cast(tf.shape(keys)[-1], tf.float32)))\n\n if values_length is not None:\n # Give no weight to illegal connections.\n if mask_future:\n # When masking the future, a position can only attend to previous timesteps.\n mask = tf.map_fn(\n lambda x: tf.sequence_mask(\n tf.minimum(tf.range(tf.shape(values)[1]) + 1, x),\n maxlen=tf.shape(values)[1],\n dtype=tf.float32),\n values_length,\n dtype=tf.float32)\n else:\n # Otherwise, simply prevent attention on out-of-range positions.\n mask = tf.sequence_mask(\n values_length,\n maxlen=tf.shape(values)[1],\n dtype=tf.float32)\n mask = tf.expand_dims(mask, axis=1)\n\n dot = dot * mask + ((1.0 - mask) * tf.float32.min)\n\n # Compute attention weights.\n attn = tf.nn.softmax(dot)\n attn = tf.layers.dropout(\n attn,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Compute attention context.\n context = tf.matmul(attn, values)\n\n return context, attn\n\n\ndef multi_head_attention(num_heads,\n queries,\n keys,\n values,\n mode,\n values_length=None,\n mask_future=False,\n dropout=0.0):\n \"\"\"Computes the multi-head attention as described in\n https://arxiv.org/abs/1706.03762.\n\n Args:\n num_heads: The number of attention heads.\n queries: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.\n keys: The sequence use to calculate attention scores. A tensor of shape\n :math:`[B, T_2, ...]`.\n values: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.\n mode: A ``tf.estimator.ModeKeys`` mode.\n values_length: The length of the values to attend.\n mask_future: Mask attention to future positions.\n dropout: The probability to drop units from the inputs.\n\n Returns:\n The concatenated attention context of each head.\n \"\"\"\n input_dim = keys.get_shape().as_list()[-1]\n\n if input_dim % num_heads != 0:\n raise ValueError(\"Multi head attention requires the input dimension to be a\"\n \" multiple of {}\".format(num_heads))\n\n head_dim = input_dim / num_heads\n heads = []\n\n for i in range(num_heads):\n with tf.variable_scope(\"head_{}\".format(i)):\n # Project queries, keys and values to different and smaller subspaces.\n queries_proj = tf.layers.conv1d(queries, head_dim, 1)\n keys_proj = tf.layers.conv1d(keys, head_dim, 1)\n values_proj = tf.layers.conv1d(values, head_dim, 1)\n\n head_i, _ = scaled_dot_attention(\n queries_proj,\n keys_proj,\n values_proj,\n mode,\n values_length=values_length,\n mask_future=mask_future,\n dropout=dropout)\n\n heads.append(head_i)\n\n # Concatenate all heads output.\n combined = tf.concat(heads, axis=2)\n outputs = tf.layers.conv1d(combined, input_dim, 1)\n\n return outputs\n\ndef feed_forward(x, inner_dim):\n \"\"\"Implements the Transformer's \"Feed Forward\" layer.\n\n .. math::\n\n ffn(x) = max(0, x*W_1 + b_1)*W_2 + b_2\n\n Args:\n x: The input.\n inner_dim: The number of units of the inner linear transformation.\n\n Returns:\n The transformed input.\n \"\"\"\n input_dim = x.get_shape().as_list()[-1]\n\n inner = tf.layers.conv1d(x, inner_dim, 1, activation=tf.nn.relu)\n outer = tf.layers.conv1d(inner, input_dim, 1)\n\n return outer\n\ndef add_and_norm(inputs,\n outputs,\n mode,\n dropout=0.1):\n \"\"\"Implements the Transformer's \"Add & Norm\" layer.\n\n Args:\n inputs: The input of the previous layer.\n outputs: The output of the previous layer.\n mode: A ``tf.estimator.ModeKeys`` mode.\n dropout: The probability to drop units in :obj:`outputs`.\n\n Returns:\n The residual and normalized output.\n \"\"\"\n outputs = tf.layers.dropout(\n outputs,\n rate=dropout,\n training=mode == tf.estimator.ModeKeys.TRAIN)\n outputs += inputs\n outputs = tf.contrib.layers.layer_norm(outputs, begin_norm_axis=-1)\n return outputs\n",
"path": "opennmt/utils/transformer.py"
}
] | diff --git a/opennmt/utils/transformer.py b/opennmt/utils/transformer.py
index 68d1a89c1..70d0fc9e6 100644
--- a/opennmt/utils/transformer.py
+++ b/opennmt/utils/transformer.py
@@ -163,5 +163,5 @@ def add_and_norm(inputs,
rate=dropout,
training=mode == tf.estimator.ModeKeys.TRAIN)
outputs += inputs
- outputs = tf.contrib.layers.layer_norm(outputs)
+ outputs = tf.contrib.layers.layer_norm(outputs, begin_norm_axis=-1)
return outputs
|
qutip__qutip-834 | Incorrect docstring of spin coherent state
In qutip.states the docstring of the `spin_coherent` state is the same of `spin_state`. The correct description should be: "Generate the coherent spin state |theta, phi>."
| [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['basis', 'qutrit_basis', 'coherent', 'coherent_dm', 'fock_dm',\n 'fock', 'thermal_dm', 'maximally_mixed_dm', 'ket2dm', 'projection',\n 'qstate', 'ket', 'bra', 'state_number_enumerate',\n 'state_number_index', 'state_index_number', 'state_number_qobj',\n 'phase_basis', 'zero_ket', 'spin_state', 'spin_coherent',\n 'bell_state', 'singlet_state', 'triplet_states', 'w_state',\n 'ghz_state', 'enr_state_dictionaries', 'enr_fock',\n 'enr_thermal_dm']\n\nimport numpy as np\nfrom scipy import arange, conj, prod\nimport scipy.sparse as sp\n\nfrom qutip.qobj import Qobj\nfrom qutip.operators import destroy, jmat\nfrom qutip.tensor import tensor\n\nfrom qutip.fastsparse import fast_csr_matrix\n\n\ndef basis(N, n=0, offset=0):\n \"\"\"Generates the vector representation of a Fock state.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n n : int\n Integer corresponding to desired number state, defaults\n to 0 if omitted.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state.\n\n Returns\n -------\n state : qobj\n Qobj representing the requested number state ``|n>``.\n\n Examples\n --------\n >>> basis(5,2)\n Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]]\n\n Notes\n -----\n\n A subtle incompatibility with the quantum optics toolbox: In QuTiP::\n\n basis(N, 0) = ground state\n\n but in the qotoolbox::\n\n basis(N, 1) = ground state\n\n \"\"\"\n if (not isinstance(N, (int, np.integer))) or N < 0:\n raise ValueError(\"N must be integer N >= 0\")\n\n if (not isinstance(n, (int, np.integer))) or n < offset:\n raise ValueError(\"n must be integer n >= 0\")\n\n if n - offset > (N - 1): # check if n is within bounds\n raise ValueError(\"basis vector index need to be in n <= N-1\")\n\n data = np.array([1], dtype=complex)\n ind = np.array([0], dtype=np.int32)\n ptr = np.array([0]*((n - offset)+1)+[1]*(N-(n-offset)),dtype=np.int32)\n\n return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,1)), isherm=False)\n\n\ndef qutrit_basis():\n \"\"\"Basis states for a three level system (qutrit)\n\n Returns\n -------\n qstates : array\n Array of qutrit basis vectors\n\n \"\"\"\n return np.array([basis(3, 0), basis(3, 1), basis(3, 2)], dtype=object)\n\n\ndef coherent(N, alpha, offset=0, method='operator'):\n \"\"\"Generates a coherent state with eigenvalue alpha.\n\n Constructed using displacement operator on vacuum state.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue of coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state. Using a non-zero offset will make the\n default method 'analytic'.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent state.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for coherent state\n\n Examples\n --------\n >>> coherent(5,0.25j)\n Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket\n Qobj data =\n [[ 9.69233235e-01+0.j ]\n [ 0.00000000e+00+0.24230831j]\n [ -4.28344935e-02+0.j ]\n [ 0.00000000e+00-0.00618204j]\n [ 7.80904967e-04+0.j ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent state is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting state is normalized. With 'analytic' method the coherent state\n is generated using the analytical formula for the coherent state\n coefficients in the Fock basis. This method does not guarantee that the\n state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n if method == \"operator\" and offset == 0:\n\n x = basis(N, 0)\n a = destroy(N)\n D = (alpha * a.dag() - conj(alpha) * a).expm()\n return D * x\n\n elif method == \"analytic\" or offset > 0:\n\n sqrtn = np.sqrt(np.arange(offset, offset+N, dtype=complex))\n sqrtn[0] = 1 # Get rid of divide by zero warning\n data = alpha/sqrtn\n if offset == 0:\n data[0] = np.exp(-abs(alpha)**2 / 2.0)\n else:\n s = np.prod(np.sqrt(np.arange(1, offset + 1))) # sqrt factorial\n data[0] = np.exp(-abs(alpha)**2 / 2.0) * alpha**(offset) / s\n np.cumprod(data, out=sqrtn) # Reuse sqrtn array\n return Qobj(sqrtn)\n\n else:\n raise TypeError(\n \"The method option can only take values 'operator' or 'analytic'\")\n\n\ndef coherent_dm(N, alpha, offset=0, method='operator'):\n \"\"\"Density matrix representation of a coherent state.\n\n Constructed via outer product of :func:`qutip.states.coherent`\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue for coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent density matrix.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of coherent state.\n\n Examples\n --------\n >>> coherent_dm(3,0.25j)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.93941695+0.j 0.00000000-0.23480733j -0.04216943+0.j ]\n [ 0.00000000+0.23480733j 0.05869011+0.j 0.00000000-0.01054025j]\n [-0.04216943+0.j 0.00000000+0.01054025j 0.00189294+0.j\\\n ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent density matrix is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting density matrix is normalized. With 'analytic' method the coherent\n density matrix is generated using the analytical formula for the coherent\n state coefficients in the Fock basis. This method does not guarantee that\n the state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n if method == \"operator\":\n psi = coherent(N, alpha, offset=offset)\n return psi * psi.dag()\n\n elif method == \"analytic\":\n psi = coherent(N, alpha, offset=offset, method='analytic')\n return psi * psi.dag()\n\n else:\n raise TypeError(\n \"The method option can only take values 'operator' or 'analytic'\")\n\n\ndef fock_dm(N, n=0, offset=0):\n \"\"\"Density matrix representation of a Fock state\n\n Constructed via outer product of :func:`qutip.states.fock`.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n n : int\n ``int`` for desired number state, defaults to 0 if omitted.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of Fock state.\n\n Examples\n --------\n >>> fock_dm(3,1)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 1.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]]\n\n \"\"\"\n psi = basis(N, n, offset=offset)\n\n return psi * psi.dag()\n\n\ndef fock(N, n=0, offset=0):\n \"\"\"Bosonic Fock (number) state.\n\n Same as :func:`qutip.states.basis`.\n\n Parameters\n ----------\n N : int\n Number of states in the Hilbert space.\n\n n : int\n ``int`` for desired number state, defaults to 0 if omitted.\n\n Returns\n -------\n Requested number state :math:`\\\\left|n\\\\right>`.\n\n Examples\n --------\n >>> fock(4,3)\n Quantum object: dims = [[4], [1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]]\n\n \"\"\"\n return basis(N, n, offset=offset)\n\n\ndef thermal_dm(N, n, method='operator'):\n \"\"\"Density matrix for a thermal state of n particles\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n : float\n Expectation value for number of particles in thermal state.\n\n method : string {'operator', 'analytic'}\n ``string`` that sets the method used to generate the\n thermal state probabilities\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n\n Examples\n --------\n >>> thermal_dm(5, 1)\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.51612903 0. 0. 0. 0. ]\n [ 0. 0.25806452 0. 0. 0. ]\n [ 0. 0. 0.12903226 0. 0. ]\n [ 0. 0. 0. 0.06451613 0. ]\n [ 0. 0. 0. 0. 0.03225806]]\n\n\n >>> thermal_dm(5, 1, 'analytic')\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.5 0. 0. 0. 0. ]\n [ 0. 0.25 0. 0. 0. ]\n [ 0. 0. 0.125 0. 0. ]\n [ 0. 0. 0. 0.0625 0. ]\n [ 0. 0. 0. 0. 0.03125]]\n\n Notes\n -----\n The 'operator' method (default) generates\n the thermal state using the truncated number operator ``num(N)``. This\n is the method that should be used in computations. The\n 'analytic' method uses the analytic coefficients derived in\n an infinite Hilbert space. The analytic form is not necessarily normalized,\n if truncated too aggressively.\n\n \"\"\"\n if n == 0:\n return fock_dm(N, 0)\n else:\n i = arange(N)\n if method == 'operator':\n beta = np.log(1.0 / n + 1.0)\n diags = np.exp(-beta * i)\n diags = diags / np.sum(diags)\n # populates diagonal terms using truncated operator expression\n rm = sp.spdiags(diags, 0, N, N, format='csr')\n elif method == 'analytic':\n # populates diagonal terms using analytic values\n rm = sp.spdiags((1.0 + n) ** (-1.0) * (n / (1.0 + n)) ** (i),\n 0, N, N, format='csr')\n else:\n raise ValueError(\n \"'method' keyword argument must be 'operator' or 'analytic'\")\n return Qobj(rm)\n\n\ndef maximally_mixed_dm(N):\n \"\"\"\n Returns the maximally mixed density matrix for a Hilbert space of\n dimension N.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n \"\"\"\n if (not isinstance(N, (int, np.int64))) or N <= 0:\n raise ValueError(\"N must be integer N > 0\")\n\n dm = sp.spdiags(np.ones(N, dtype=complex)/float(N), 0, N, N, format='csr')\n\n return Qobj(dm, isherm=True)\n\n\ndef ket2dm(Q):\n \"\"\"Takes input ket or bra vector and returns density matrix\n formed by outer product.\n\n Parameters\n ----------\n Q : qobj\n Ket or bra type quantum object.\n\n Returns\n -------\n dm : qobj\n Density matrix formed by outer product of `Q`.\n\n Examples\n --------\n >>> x=basis(3,2)\n >>> ket2dm(x)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 1.+0.j]]\n\n \"\"\"\n if Q.type == 'ket':\n out = Q * Q.dag()\n elif Q.type == 'bra':\n out = Q.dag() * Q\n else:\n raise TypeError(\"Input is not a ket or bra vector.\")\n return Qobj(out)\n\n\n#\n# projection operator\n#\ndef projection(N, n, m, offset=0):\n \"\"\"The projection operator that projects state :math:`|m>` on state :math:`|n>`.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n, m : float\n The number states in the projection.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the projector.\n\n Returns\n -------\n oper : qobj\n Requested projection operator.\n\n \"\"\"\n ket1 = basis(N, n, offset=offset)\n ket2 = basis(N, m, offset=offset)\n\n return ket1 * ket2.dag()\n\n\n#\n# composite qubit states\n#\ndef qstate(string):\n \"\"\"Creates a tensor product for a set of qubits in either\n the 'up' :math:`|0>` or 'down' :math:`|1>` state.\n\n Parameters\n ----------\n string : str\n String containing 'u' or 'd' for each qubit (ex. 'ududd')\n\n Returns\n -------\n qstate : qobj\n Qobj for tensor product corresponding to input string.\n\n Notes\n -----\n Look at ket and bra for more general functions\n creating multiparticle states.\n\n Examples\n --------\n >>> qstate('udu')\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n\n \"\"\"\n n = len(string)\n if n != (string.count('u') + string.count('d')):\n raise TypeError('String input to QSTATE must consist ' +\n 'of \"u\" and \"d\" elements only')\n else:\n up = basis(2, 1)\n dn = basis(2, 0)\n lst = []\n for k in range(n):\n if string[k] == 'u':\n lst.append(up)\n else:\n lst.append(dn)\n return tensor(lst)\n\n\n#\n# different qubit notation dictionary\n#\n_qubit_dict = {'g': 0, # ground state\n 'e': 1, # excited state\n 'u': 0, # spin up\n 'd': 1, # spin down\n 'H': 0, # horizontal polarization\n 'V': 1} # vertical polarization\n\n\ndef _character_to_qudit(x):\n \"\"\"\n Converts a character representing a one-particle state into int.\n \"\"\"\n if x in _qubit_dict:\n return _qubit_dict[x]\n else:\n return int(x)\n\n\ndef ket(seq, dim=2):\n \"\"\"\n Produces a multiparticle ket state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n Returns\n -------\n ket : qobj\n\n Examples\n --------\n >>> ket(\"10\")\n Quantum object: dims = [[2, 2], [1, 1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 1.]\n [ 0.]]\n\n >>> ket(\"Hue\")\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"12\", 3)\n Quantum object: dims = [[3, 3], [1, 1]], shape = [9, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"31\", [5, 2])\n Quantum object: dims = [[5, 2], [1, 1]], shape = [10, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n \"\"\"\n if isinstance(dim, int):\n dim = [dim] * len(seq)\n return tensor([basis(dim[i], _character_to_qudit(x))\n for i, x in enumerate(seq)])\n\n\ndef bra(seq, dim=2):\n \"\"\"\n Produces a multiparticle bra state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n Returns\n -------\n bra : qobj\n\n Examples\n --------\n >>> bra(\"10\")\n Quantum object: dims = [[1, 1], [2, 2]], shape = [1, 4], type = bra\n Qobj data =\n [[ 0. 0. 1. 0.]]\n\n >>> bra(\"Hue\")\n Quantum object: dims = [[1, 1, 1], [2, 2, 2]], shape = [1, 8], type = bra\n Qobj data =\n [[ 0. 1. 0. 0. 0. 0. 0. 0.]]\n\n >>> bra(\"12\", 3)\n Quantum object: dims = [[1, 1], [3, 3]], shape = [1, 9], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 1. 0. 0. 0.]]\n\n\n >>> bra(\"31\", [5, 2])\n Quantum object: dims = [[1, 1], [5, 2]], shape = [1, 10], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]]\n \"\"\"\n return ket(seq, dim=dim).dag()\n\n\n#\n# quantum state number helper functions\n#\ndef state_number_enumerate(dims, excitations=None, state=None, idx=0):\n \"\"\"\n An iterator that enumerate all the state number arrays (quantum numbers on\n the form [n1, n2, n3, ...]) for a system with dimensions given by dims.\n\n Example:\n\n >>> for state in state_number_enumerate([2,2]):\n >>> print(state)\n [ 0 0 ]\n [ 0 1 ]\n [ 1 0 ]\n [ 1 1 ]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n Current state in the iteration. Used internally.\n\n excitations : integer (None)\n Restrict state space to states with excitation numbers below or\n equal to this value.\n\n idx : integer\n Current index in the iteration. Used internally.\n\n Returns\n -------\n state_number : list\n Successive state number arrays that can be used in loops and other\n iterations, using standard state enumeration *by definition*.\n\n \"\"\"\n\n if state is None:\n state = np.zeros(len(dims), dtype=int)\n\n if excitations and sum(state[0:idx]) > excitations:\n pass\n elif idx == len(dims):\n if excitations is None:\n yield np.array(state)\n else:\n yield tuple(state)\n else:\n for n in range(dims[idx]):\n state[idx] = n\n for s in state_number_enumerate(dims, excitations, state, idx + 1):\n yield s\n\n\ndef state_number_index(dims, state):\n \"\"\"\n Return the index of a quantum state corresponding to state,\n given a system with dimensions given by dims.\n\n Example:\n\n >>> state_number_index([2, 2, 2], [1, 1, 0])\n 6\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n Returns\n -------\n idx : int\n The index of the state given by `state` in standard enumeration\n ordering.\n\n \"\"\"\n return int(\n sum([state[i] * prod(dims[i + 1:]) for i, d in enumerate(dims)]))\n\n\ndef state_index_number(dims, index):\n \"\"\"\n Return a quantum number representation given a state index, for a system\n of composite structure defined by dims.\n\n Example:\n\n >>> state_index_number([2, 2, 2], 6)\n [1, 1, 0]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n index : integer\n The index of the state in standard enumeration ordering.\n\n Returns\n -------\n state : list\n The state number array corresponding to index `index` in standard\n enumeration ordering.\n\n \"\"\"\n state = np.empty_like(dims)\n\n D = np.concatenate([np.flipud(np.cumprod(np.flipud(dims[1:]))), [1]])\n\n for n in range(len(dims)):\n state[n] = index / D[n]\n index -= state[n] * D[n]\n\n return list(state)\n\n\ndef state_number_qobj(dims, state):\n \"\"\"\n Return a Qobj representation of a quantum state specified by the state\n array `state`.\n\n Example:\n\n >>> state_number_qobj([2, 2, 2], [1, 0, 1])\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], \\\nshape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n Returns\n -------\n state : :class:`qutip.Qobj.qobj`\n The state as a :class:`qutip.Qobj.qobj` instance.\n\n\n \"\"\"\n return tensor([fock(dims[i], s) for i, s in enumerate(state)])\n\n\n#\n# Excitation-number restricted (enr) states\n#\ndef enr_state_dictionaries(dims, excitations):\n \"\"\"\n Return the number of states, and lookup-dictionaries for translating\n a state tuple to a state index, and vice versa, for a system with a given\n number of components and maximum number of excitations.\n\n Parameters\n ----------\n dims: list\n A list with the number of states in each sub-system.\n\n excitations : integer\n The maximum numbers of dimension\n\n Returns\n -------\n nstates, state2idx, idx2state: integer, dict, dict\n The number of states `nstates`, a dictionary for looking up state\n indices from a state tuple, and a dictionary for looking up state\n state tuples from state indices.\n \"\"\"\n nstates = 0\n state2idx = {}\n idx2state = {}\n\n for state in state_number_enumerate(dims, excitations):\n state2idx[state] = nstates\n idx2state[nstates] = state\n nstates += 1\n\n return nstates, state2idx, idx2state\n\n\ndef enr_fock(dims, excitations, state):\n \"\"\"\n Generate the Fock state representation in a excitation-number restricted\n state space. The `dims` argument is a list of integers that define the\n number of quantums states of each component of a composite quantum system,\n and the `excitations` specifies the maximum number of excitations for\n the basis states that are to be included in the state space. The `state`\n argument is a tuple of integers that specifies the state (in the number\n basis representation) for which to generate the Fock state representation.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n state : list of integers\n The state in the number basis representation.\n\n Returns\n -------\n ket : Qobj\n A Qobj instance that represent a Fock state in the exication-number-\n restricted state space defined by `dims` and `exciations`.\n\n \"\"\"\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n data = sp.lil_matrix((nstates, 1), dtype=np.complex)\n\n try:\n data[state2idx[tuple(state)], 0] = 1\n except:\n raise ValueError(\"The state tuple %s is not in the restricted \"\n \"state space\" % str(tuple(state)))\n\n return Qobj(data, dims=[dims, 1])\n\n\ndef enr_thermal_dm(dims, excitations, n):\n \"\"\"\n Generate the density operator for a thermal state in the excitation-number-\n restricted state space defined by the `dims` and `exciations` arguments.\n See the documentation for enr_fock for a more detailed description of\n these arguments. The temperature of each mode in dims is specified by\n the average number of excitatons `n`.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n n : integer\n The average number of exciations in the thermal state. `n` can be\n a float (which then applies to each mode), or a list/array of the same\n length as dims, in which each element corresponds specifies the\n temperature of the corresponding mode.\n\n Returns\n -------\n dm : Qobj\n Thermal state density matrix.\n \"\"\"\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n if not isinstance(n, (list, np.ndarray)):\n n = np.ones(len(dims)) * n\n else:\n n = np.asarray(n)\n\n diags = [np.prod((n / (n + 1)) ** np.array(state))\n for idx, state in idx2state.items()]\n diags /= np.sum(diags)\n data = sp.spdiags(diags, 0, nstates, nstates, format='csr')\n\n return Qobj(data, dims=[dims, dims])\n\n\ndef phase_basis(N, m, phi0=0):\n \"\"\"\n Basis vector for the mth phase of the Pegg-Barnett phase operator.\n\n Parameters\n ----------\n N : int\n Number of basis vectors in Hilbert space.\n m : int\n Integer corresponding to the mth discrete phase phi_m=phi0+2*pi*m/N\n phi0 : float (default=0)\n Reference phase angle.\n\n Returns\n -------\n state : qobj\n Ket vector for mth Pegg-Barnett phase operator basis state.\n\n Notes\n -----\n The Pegg-Barnett basis states form a complete set over the truncated\n Hilbert space.\n\n \"\"\"\n phim = phi0 + (2.0 * np.pi * m) / N\n n = np.arange(N).reshape((N, 1))\n data = 1.0 / np.sqrt(N) * np.exp(1.0j * n * phim)\n return Qobj(data)\n\n\ndef zero_ket(N, dims=None):\n \"\"\"\n Creates the zero ket vector with shape Nx1 and\n dimensions `dims`.\n\n Parameters\n ----------\n N : int\n Hilbert space dimensionality\n dims : list\n Optional dimensions if ket corresponds to\n a composite Hilbert space.\n\n Returns\n -------\n zero_ket : qobj\n Zero ket on given Hilbert space.\n\n \"\"\"\n return Qobj(sp.csr_matrix((N, 1), dtype=complex), dims=dims)\n\n\ndef spin_state(j, m, type='ket'):\n \"\"\"Generates the spin state |j, m>, i.e. the eigenstate\n of the spin-j Sz operator with eigenvalue m.\n\n Parameters\n ----------\n j : float\n The spin of the state ().\n\n m : int\n Eigenvalue of the spin-j Sz operator.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin state\n\n \"\"\"\n J = 2 * j + 1\n\n if type == 'ket':\n return basis(int(J), int(j - m))\n elif type == 'bra':\n return basis(int(J), int(j - m)).dag()\n elif type == 'dm':\n return fock_dm(int(J), int(j - m))\n else:\n raise ValueError(\"invalid value keyword argument 'type'\")\n\n\ndef spin_coherent(j, theta, phi, type='ket'):\n \"\"\"Generates the spin state |j, m>, i.e. the eigenstate\n of the spin-j Sz operator with eigenvalue m.\n\n Parameters\n ----------\n j : float\n The spin of the state.\n\n theta : float\n Angle from z axis.\n\n phi : float\n Angle from x axis.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin coherent state\n\n \"\"\"\n Sp = jmat(j, '+')\n Sm = jmat(j, '-')\n psi = (0.5 * theta * np.exp(1j * phi) * Sm -\n 0.5 * theta * np.exp(-1j * phi) * Sp).expm() * spin_state(j, j)\n\n if type == 'ket':\n return psi\n elif type == 'bra':\n return psi.dag()\n elif type == 'dm':\n return ket2dm(psi)\n else:\n raise ValueError(\"invalid value keyword argument 'type'\")\n\n\ndef bell_state(state='00'):\n \"\"\"\n Returns the Bell state:\n\n |B00> = 1 / sqrt(2)*[|0>|0>+|1>|1>]\n |B01> = 1 / sqrt(2)*[|0>|0>-|1>|1>]\n |B10> = 1 / sqrt(2)*[|0>|1>+|1>|0>]\n |B11> = 1 / sqrt(2)*[|0>|1>-|1>|0>]\n\n Returns\n -------\n Bell_state : qobj\n Bell state\n\n \"\"\"\n if state == '00':\n Bell_state = tensor(\n basis(2), basis(2))+tensor(basis(2, 1), basis(2, 1))\n elif state == '01':\n Bell_state = tensor(\n basis(2), basis(2))-tensor(basis(2, 1), basis(2, 1))\n elif state == '10':\n Bell_state = tensor(\n basis(2), basis(2, 1))+tensor(basis(2, 1), basis(2))\n elif state == '11':\n Bell_state = tensor(\n basis(2), basis(2, 1))-tensor(basis(2, 1), basis(2))\n\n return Bell_state.unit()\n\n\ndef singlet_state():\n \"\"\"\n Returns the two particle singlet-state:\n\n |S>=1/sqrt(2)*[|0>|1>-|1>|0>]\n\n that is identical to the fourth bell state.\n\n Returns\n -------\n Bell_state : qobj\n |B11> Bell state\n\n \"\"\"\n return bell_state('11')\n\n\ndef triplet_states():\n \"\"\"\n Returns the two particle triplet-states:\n\n |T>= |1>|1>\n = 1 / sqrt(2)*[|0>|1>-|1>|0>]\n = |0>|0>\n that is identical to the fourth bell state.\n\n Returns\n -------\n trip_states : list\n 2 particle triplet states\n\n \"\"\"\n trip_states = []\n trip_states.append(tensor(basis(2, 1), basis(2, 1)))\n trip_states.append(\n (tensor(basis(2), basis(2, 1)) + tensor(basis(2, 1), basis(2))).unit()\n )\n trip_states.append(tensor(basis(2), basis(2)))\n return trip_states\n\n\ndef w_state(N=3):\n \"\"\"\n Returns the N-qubit W-state.\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n Returns\n -------\n W : qobj\n N-qubit W-state\n\n \"\"\"\n inds = np.zeros(N, dtype=int)\n inds[0] = 1\n state = tensor([basis(2, x) for x in inds])\n for kk in range(1, N):\n perm_inds = np.roll(inds, kk)\n state += tensor([basis(2, x) for x in perm_inds])\n return state.unit()\n\n\ndef ghz_state(N=3):\n \"\"\"\n Returns the N-qubit GHZ-state.\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n Returns\n -------\n G : qobj\n N-qubit GHZ-state\n\n \"\"\"\n state = (tensor([basis(2) for k in range(N)]) +\n tensor([basis(2, 1) for k in range(N)]))\n return state/np.sqrt(2)\n",
"path": "qutip/states.py"
}
] | [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['basis', 'qutrit_basis', 'coherent', 'coherent_dm', 'fock_dm',\n 'fock', 'thermal_dm', 'maximally_mixed_dm', 'ket2dm', 'projection',\n 'qstate', 'ket', 'bra', 'state_number_enumerate',\n 'state_number_index', 'state_index_number', 'state_number_qobj',\n 'phase_basis', 'zero_ket', 'spin_state', 'spin_coherent',\n 'bell_state', 'singlet_state', 'triplet_states', 'w_state',\n 'ghz_state', 'enr_state_dictionaries', 'enr_fock',\n 'enr_thermal_dm']\n\nimport numpy as np\nfrom scipy import arange, conj, prod\nimport scipy.sparse as sp\n\nfrom qutip.qobj import Qobj\nfrom qutip.operators import destroy, jmat\nfrom qutip.tensor import tensor\n\nfrom qutip.fastsparse import fast_csr_matrix\n\n\ndef basis(N, n=0, offset=0):\n \"\"\"Generates the vector representation of a Fock state.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n n : int\n Integer corresponding to desired number state, defaults\n to 0 if omitted.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state.\n\n Returns\n -------\n state : qobj\n Qobj representing the requested number state ``|n>``.\n\n Examples\n --------\n >>> basis(5,2)\n Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]]\n\n Notes\n -----\n\n A subtle incompatibility with the quantum optics toolbox: In QuTiP::\n\n basis(N, 0) = ground state\n\n but in the qotoolbox::\n\n basis(N, 1) = ground state\n\n \"\"\"\n if (not isinstance(N, (int, np.integer))) or N < 0:\n raise ValueError(\"N must be integer N >= 0\")\n\n if (not isinstance(n, (int, np.integer))) or n < offset:\n raise ValueError(\"n must be integer n >= 0\")\n\n if n - offset > (N - 1): # check if n is within bounds\n raise ValueError(\"basis vector index need to be in n <= N-1\")\n\n data = np.array([1], dtype=complex)\n ind = np.array([0], dtype=np.int32)\n ptr = np.array([0]*((n - offset)+1)+[1]*(N-(n-offset)),dtype=np.int32)\n\n return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,1)), isherm=False)\n\n\ndef qutrit_basis():\n \"\"\"Basis states for a three level system (qutrit)\n\n Returns\n -------\n qstates : array\n Array of qutrit basis vectors\n\n \"\"\"\n return np.array([basis(3, 0), basis(3, 1), basis(3, 2)], dtype=object)\n\n\ndef coherent(N, alpha, offset=0, method='operator'):\n \"\"\"Generates a coherent state with eigenvalue alpha.\n\n Constructed using displacement operator on vacuum state.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue of coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state. Using a non-zero offset will make the\n default method 'analytic'.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent state.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for coherent state\n\n Examples\n --------\n >>> coherent(5,0.25j)\n Quantum object: dims = [[5], [1]], shape = [5, 1], type = ket\n Qobj data =\n [[ 9.69233235e-01+0.j ]\n [ 0.00000000e+00+0.24230831j]\n [ -4.28344935e-02+0.j ]\n [ 0.00000000e+00-0.00618204j]\n [ 7.80904967e-04+0.j ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent state is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting state is normalized. With 'analytic' method the coherent state\n is generated using the analytical formula for the coherent state\n coefficients in the Fock basis. This method does not guarantee that the\n state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n if method == \"operator\" and offset == 0:\n\n x = basis(N, 0)\n a = destroy(N)\n D = (alpha * a.dag() - conj(alpha) * a).expm()\n return D * x\n\n elif method == \"analytic\" or offset > 0:\n\n sqrtn = np.sqrt(np.arange(offset, offset+N, dtype=complex))\n sqrtn[0] = 1 # Get rid of divide by zero warning\n data = alpha/sqrtn\n if offset == 0:\n data[0] = np.exp(-abs(alpha)**2 / 2.0)\n else:\n s = np.prod(np.sqrt(np.arange(1, offset + 1))) # sqrt factorial\n data[0] = np.exp(-abs(alpha)**2 / 2.0) * alpha**(offset) / s\n np.cumprod(data, out=sqrtn) # Reuse sqrtn array\n return Qobj(sqrtn)\n\n else:\n raise TypeError(\n \"The method option can only take values 'operator' or 'analytic'\")\n\n\ndef coherent_dm(N, alpha, offset=0, method='operator'):\n \"\"\"Density matrix representation of a coherent state.\n\n Constructed via outer product of :func:`qutip.states.coherent`\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n alpha : float/complex\n Eigenvalue for coherent state.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the state.\n\n method : string {'operator', 'analytic'}\n Method for generating coherent density matrix.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of coherent state.\n\n Examples\n --------\n >>> coherent_dm(3,0.25j)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.93941695+0.j 0.00000000-0.23480733j -0.04216943+0.j ]\n [ 0.00000000+0.23480733j 0.05869011+0.j 0.00000000-0.01054025j]\n [-0.04216943+0.j 0.00000000+0.01054025j 0.00189294+0.j\\\n ]]\n\n Notes\n -----\n Select method 'operator' (default) or 'analytic'. With the\n 'operator' method, the coherent density matrix is generated by displacing\n the vacuum state using the displacement operator defined in the\n truncated Hilbert space of size 'N'. This method guarantees that the\n resulting density matrix is normalized. With 'analytic' method the coherent\n density matrix is generated using the analytical formula for the coherent\n state coefficients in the Fock basis. This method does not guarantee that\n the state is normalized if truncated to a small number of Fock states,\n but would in that case give more accurate coefficients.\n\n \"\"\"\n if method == \"operator\":\n psi = coherent(N, alpha, offset=offset)\n return psi * psi.dag()\n\n elif method == \"analytic\":\n psi = coherent(N, alpha, offset=offset, method='analytic')\n return psi * psi.dag()\n\n else:\n raise TypeError(\n \"The method option can only take values 'operator' or 'analytic'\")\n\n\ndef fock_dm(N, n=0, offset=0):\n \"\"\"Density matrix representation of a Fock state\n\n Constructed via outer product of :func:`qutip.states.fock`.\n\n Parameters\n ----------\n N : int\n Number of Fock states in Hilbert space.\n\n n : int\n ``int`` for desired number state, defaults to 0 if omitted.\n\n Returns\n -------\n dm : qobj\n Density matrix representation of Fock state.\n\n Examples\n --------\n >>> fock_dm(3,1)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 1.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]]\n\n \"\"\"\n psi = basis(N, n, offset=offset)\n\n return psi * psi.dag()\n\n\ndef fock(N, n=0, offset=0):\n \"\"\"Bosonic Fock (number) state.\n\n Same as :func:`qutip.states.basis`.\n\n Parameters\n ----------\n N : int\n Number of states in the Hilbert space.\n\n n : int\n ``int`` for desired number state, defaults to 0 if omitted.\n\n Returns\n -------\n Requested number state :math:`\\\\left|n\\\\right>`.\n\n Examples\n --------\n >>> fock(4,3)\n Quantum object: dims = [[4], [1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.+0.j]\n [ 0.+0.j]\n [ 0.+0.j]\n [ 1.+0.j]]\n\n \"\"\"\n return basis(N, n, offset=offset)\n\n\ndef thermal_dm(N, n, method='operator'):\n \"\"\"Density matrix for a thermal state of n particles\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n : float\n Expectation value for number of particles in thermal state.\n\n method : string {'operator', 'analytic'}\n ``string`` that sets the method used to generate the\n thermal state probabilities\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n\n Examples\n --------\n >>> thermal_dm(5, 1)\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.51612903 0. 0. 0. 0. ]\n [ 0. 0.25806452 0. 0. 0. ]\n [ 0. 0. 0.12903226 0. 0. ]\n [ 0. 0. 0. 0.06451613 0. ]\n [ 0. 0. 0. 0. 0.03225806]]\n\n\n >>> thermal_dm(5, 1, 'analytic')\n Quantum object: dims = [[5], [5]], \\\nshape = [5, 5], type = oper, isHerm = True\n Qobj data =\n [[ 0.5 0. 0. 0. 0. ]\n [ 0. 0.25 0. 0. 0. ]\n [ 0. 0. 0.125 0. 0. ]\n [ 0. 0. 0. 0.0625 0. ]\n [ 0. 0. 0. 0. 0.03125]]\n\n Notes\n -----\n The 'operator' method (default) generates\n the thermal state using the truncated number operator ``num(N)``. This\n is the method that should be used in computations. The\n 'analytic' method uses the analytic coefficients derived in\n an infinite Hilbert space. The analytic form is not necessarily normalized,\n if truncated too aggressively.\n\n \"\"\"\n if n == 0:\n return fock_dm(N, 0)\n else:\n i = arange(N)\n if method == 'operator':\n beta = np.log(1.0 / n + 1.0)\n diags = np.exp(-beta * i)\n diags = diags / np.sum(diags)\n # populates diagonal terms using truncated operator expression\n rm = sp.spdiags(diags, 0, N, N, format='csr')\n elif method == 'analytic':\n # populates diagonal terms using analytic values\n rm = sp.spdiags((1.0 + n) ** (-1.0) * (n / (1.0 + n)) ** (i),\n 0, N, N, format='csr')\n else:\n raise ValueError(\n \"'method' keyword argument must be 'operator' or 'analytic'\")\n return Qobj(rm)\n\n\ndef maximally_mixed_dm(N):\n \"\"\"\n Returns the maximally mixed density matrix for a Hilbert space of\n dimension N.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n Returns\n -------\n dm : qobj\n Thermal state density matrix.\n \"\"\"\n if (not isinstance(N, (int, np.int64))) or N <= 0:\n raise ValueError(\"N must be integer N > 0\")\n\n dm = sp.spdiags(np.ones(N, dtype=complex)/float(N), 0, N, N, format='csr')\n\n return Qobj(dm, isherm=True)\n\n\ndef ket2dm(Q):\n \"\"\"Takes input ket or bra vector and returns density matrix\n formed by outer product.\n\n Parameters\n ----------\n Q : qobj\n Ket or bra type quantum object.\n\n Returns\n -------\n dm : qobj\n Density matrix formed by outer product of `Q`.\n\n Examples\n --------\n >>> x=basis(3,2)\n >>> ket2dm(x)\n Quantum object: dims = [[3], [3]], \\\nshape = [3, 3], type = oper, isHerm = True\n Qobj data =\n [[ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 0.+0.j]\n [ 0.+0.j 0.+0.j 1.+0.j]]\n\n \"\"\"\n if Q.type == 'ket':\n out = Q * Q.dag()\n elif Q.type == 'bra':\n out = Q.dag() * Q\n else:\n raise TypeError(\"Input is not a ket or bra vector.\")\n return Qobj(out)\n\n\n#\n# projection operator\n#\ndef projection(N, n, m, offset=0):\n \"\"\"The projection operator that projects state :math:`|m>` on state :math:`|n>`.\n\n Parameters\n ----------\n N : int\n Number of basis states in Hilbert space.\n\n n, m : float\n The number states in the projection.\n\n offset : int (default 0)\n The lowest number state that is included in the finite number state\n representation of the projector.\n\n Returns\n -------\n oper : qobj\n Requested projection operator.\n\n \"\"\"\n ket1 = basis(N, n, offset=offset)\n ket2 = basis(N, m, offset=offset)\n\n return ket1 * ket2.dag()\n\n\n#\n# composite qubit states\n#\ndef qstate(string):\n \"\"\"Creates a tensor product for a set of qubits in either\n the 'up' :math:`|0>` or 'down' :math:`|1>` state.\n\n Parameters\n ----------\n string : str\n String containing 'u' or 'd' for each qubit (ex. 'ududd')\n\n Returns\n -------\n qstate : qobj\n Qobj for tensor product corresponding to input string.\n\n Notes\n -----\n Look at ket and bra for more general functions\n creating multiparticle states.\n\n Examples\n --------\n >>> qstate('udu')\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n\n \"\"\"\n n = len(string)\n if n != (string.count('u') + string.count('d')):\n raise TypeError('String input to QSTATE must consist ' +\n 'of \"u\" and \"d\" elements only')\n else:\n up = basis(2, 1)\n dn = basis(2, 0)\n lst = []\n for k in range(n):\n if string[k] == 'u':\n lst.append(up)\n else:\n lst.append(dn)\n return tensor(lst)\n\n\n#\n# different qubit notation dictionary\n#\n_qubit_dict = {'g': 0, # ground state\n 'e': 1, # excited state\n 'u': 0, # spin up\n 'd': 1, # spin down\n 'H': 0, # horizontal polarization\n 'V': 1} # vertical polarization\n\n\ndef _character_to_qudit(x):\n \"\"\"\n Converts a character representing a one-particle state into int.\n \"\"\"\n if x in _qubit_dict:\n return _qubit_dict[x]\n else:\n return int(x)\n\n\ndef ket(seq, dim=2):\n \"\"\"\n Produces a multiparticle ket state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n Returns\n -------\n ket : qobj\n\n Examples\n --------\n >>> ket(\"10\")\n Quantum object: dims = [[2, 2], [1, 1]], shape = [4, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 1.]\n [ 0.]]\n\n >>> ket(\"Hue\")\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], shape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"12\", 3)\n Quantum object: dims = [[3, 3], [1, 1]], shape = [9, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]\n [ 0.]]\n\n >>> ket(\"31\", [5, 2])\n Quantum object: dims = [[5, 2], [1, 1]], shape = [10, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n \"\"\"\n if isinstance(dim, int):\n dim = [dim] * len(seq)\n return tensor([basis(dim[i], _character_to_qudit(x))\n for i, x in enumerate(seq)])\n\n\ndef bra(seq, dim=2):\n \"\"\"\n Produces a multiparticle bra state for a list or string,\n where each element stands for state of the respective particle.\n\n Parameters\n ----------\n seq : str / list of ints or characters\n Each element defines state of the respective particle.\n (e.g. [1,1,0,1] or a string \"1101\").\n For qubits it is also possible to use the following conventions:\n - 'g'/'e' (ground and excited state)\n - 'u'/'d' (spin up and down)\n - 'H'/'V' (horizontal and vertical polarization)\n Note: for dimension > 9 you need to use a list.\n\n\n dim : int (default: 2) / list of ints\n Space dimension for each particle:\n int if there are the same, list if they are different.\n\n Returns\n -------\n bra : qobj\n\n Examples\n --------\n >>> bra(\"10\")\n Quantum object: dims = [[1, 1], [2, 2]], shape = [1, 4], type = bra\n Qobj data =\n [[ 0. 0. 1. 0.]]\n\n >>> bra(\"Hue\")\n Quantum object: dims = [[1, 1, 1], [2, 2, 2]], shape = [1, 8], type = bra\n Qobj data =\n [[ 0. 1. 0. 0. 0. 0. 0. 0.]]\n\n >>> bra(\"12\", 3)\n Quantum object: dims = [[1, 1], [3, 3]], shape = [1, 9], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 1. 0. 0. 0.]]\n\n\n >>> bra(\"31\", [5, 2])\n Quantum object: dims = [[1, 1], [5, 2]], shape = [1, 10], type = bra\n Qobj data =\n [[ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]]\n \"\"\"\n return ket(seq, dim=dim).dag()\n\n\n#\n# quantum state number helper functions\n#\ndef state_number_enumerate(dims, excitations=None, state=None, idx=0):\n \"\"\"\n An iterator that enumerate all the state number arrays (quantum numbers on\n the form [n1, n2, n3, ...]) for a system with dimensions given by dims.\n\n Example:\n\n >>> for state in state_number_enumerate([2,2]):\n >>> print(state)\n [ 0 0 ]\n [ 0 1 ]\n [ 1 0 ]\n [ 1 1 ]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n Current state in the iteration. Used internally.\n\n excitations : integer (None)\n Restrict state space to states with excitation numbers below or\n equal to this value.\n\n idx : integer\n Current index in the iteration. Used internally.\n\n Returns\n -------\n state_number : list\n Successive state number arrays that can be used in loops and other\n iterations, using standard state enumeration *by definition*.\n\n \"\"\"\n\n if state is None:\n state = np.zeros(len(dims), dtype=int)\n\n if excitations and sum(state[0:idx]) > excitations:\n pass\n elif idx == len(dims):\n if excitations is None:\n yield np.array(state)\n else:\n yield tuple(state)\n else:\n for n in range(dims[idx]):\n state[idx] = n\n for s in state_number_enumerate(dims, excitations, state, idx + 1):\n yield s\n\n\ndef state_number_index(dims, state):\n \"\"\"\n Return the index of a quantum state corresponding to state,\n given a system with dimensions given by dims.\n\n Example:\n\n >>> state_number_index([2, 2, 2], [1, 1, 0])\n 6\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n Returns\n -------\n idx : int\n The index of the state given by `state` in standard enumeration\n ordering.\n\n \"\"\"\n return int(\n sum([state[i] * prod(dims[i + 1:]) for i, d in enumerate(dims)]))\n\n\ndef state_index_number(dims, index):\n \"\"\"\n Return a quantum number representation given a state index, for a system\n of composite structure defined by dims.\n\n Example:\n\n >>> state_index_number([2, 2, 2], 6)\n [1, 1, 0]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n index : integer\n The index of the state in standard enumeration ordering.\n\n Returns\n -------\n state : list\n The state number array corresponding to index `index` in standard\n enumeration ordering.\n\n \"\"\"\n state = np.empty_like(dims)\n\n D = np.concatenate([np.flipud(np.cumprod(np.flipud(dims[1:]))), [1]])\n\n for n in range(len(dims)):\n state[n] = index / D[n]\n index -= state[n] * D[n]\n\n return list(state)\n\n\ndef state_number_qobj(dims, state):\n \"\"\"\n Return a Qobj representation of a quantum state specified by the state\n array `state`.\n\n Example:\n\n >>> state_number_qobj([2, 2, 2], [1, 0, 1])\n Quantum object: dims = [[2, 2, 2], [1, 1, 1]], \\\nshape = [8, 1], type = ket\n Qobj data =\n [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 1.]\n [ 0.]\n [ 0.]]\n\n Parameters\n ----------\n dims : list or array\n The quantum state dimensions array, as it would appear in a Qobj.\n\n state : list\n State number array.\n\n Returns\n -------\n state : :class:`qutip.Qobj.qobj`\n The state as a :class:`qutip.Qobj.qobj` instance.\n\n\n \"\"\"\n return tensor([fock(dims[i], s) for i, s in enumerate(state)])\n\n\n#\n# Excitation-number restricted (enr) states\n#\ndef enr_state_dictionaries(dims, excitations):\n \"\"\"\n Return the number of states, and lookup-dictionaries for translating\n a state tuple to a state index, and vice versa, for a system with a given\n number of components and maximum number of excitations.\n\n Parameters\n ----------\n dims: list\n A list with the number of states in each sub-system.\n\n excitations : integer\n The maximum numbers of dimension\n\n Returns\n -------\n nstates, state2idx, idx2state: integer, dict, dict\n The number of states `nstates`, a dictionary for looking up state\n indices from a state tuple, and a dictionary for looking up state\n state tuples from state indices.\n \"\"\"\n nstates = 0\n state2idx = {}\n idx2state = {}\n\n for state in state_number_enumerate(dims, excitations):\n state2idx[state] = nstates\n idx2state[nstates] = state\n nstates += 1\n\n return nstates, state2idx, idx2state\n\n\ndef enr_fock(dims, excitations, state):\n \"\"\"\n Generate the Fock state representation in a excitation-number restricted\n state space. The `dims` argument is a list of integers that define the\n number of quantums states of each component of a composite quantum system,\n and the `excitations` specifies the maximum number of excitations for\n the basis states that are to be included in the state space. The `state`\n argument is a tuple of integers that specifies the state (in the number\n basis representation) for which to generate the Fock state representation.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n state : list of integers\n The state in the number basis representation.\n\n Returns\n -------\n ket : Qobj\n A Qobj instance that represent a Fock state in the exication-number-\n restricted state space defined by `dims` and `exciations`.\n\n \"\"\"\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n data = sp.lil_matrix((nstates, 1), dtype=np.complex)\n\n try:\n data[state2idx[tuple(state)], 0] = 1\n except:\n raise ValueError(\"The state tuple %s is not in the restricted \"\n \"state space\" % str(tuple(state)))\n\n return Qobj(data, dims=[dims, 1])\n\n\ndef enr_thermal_dm(dims, excitations, n):\n \"\"\"\n Generate the density operator for a thermal state in the excitation-number-\n restricted state space defined by the `dims` and `exciations` arguments.\n See the documentation for enr_fock for a more detailed description of\n these arguments. The temperature of each mode in dims is specified by\n the average number of excitatons `n`.\n\n Parameters\n ----------\n dims : list\n A list of the dimensions of each subsystem of a composite quantum\n system.\n\n excitations : integer\n The maximum number of excitations that are to be included in the\n state space.\n\n n : integer\n The average number of exciations in the thermal state. `n` can be\n a float (which then applies to each mode), or a list/array of the same\n length as dims, in which each element corresponds specifies the\n temperature of the corresponding mode.\n\n Returns\n -------\n dm : Qobj\n Thermal state density matrix.\n \"\"\"\n nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)\n\n if not isinstance(n, (list, np.ndarray)):\n n = np.ones(len(dims)) * n\n else:\n n = np.asarray(n)\n\n diags = [np.prod((n / (n + 1)) ** np.array(state))\n for idx, state in idx2state.items()]\n diags /= np.sum(diags)\n data = sp.spdiags(diags, 0, nstates, nstates, format='csr')\n\n return Qobj(data, dims=[dims, dims])\n\n\ndef phase_basis(N, m, phi0=0):\n \"\"\"\n Basis vector for the mth phase of the Pegg-Barnett phase operator.\n\n Parameters\n ----------\n N : int\n Number of basis vectors in Hilbert space.\n m : int\n Integer corresponding to the mth discrete phase phi_m=phi0+2*pi*m/N\n phi0 : float (default=0)\n Reference phase angle.\n\n Returns\n -------\n state : qobj\n Ket vector for mth Pegg-Barnett phase operator basis state.\n\n Notes\n -----\n The Pegg-Barnett basis states form a complete set over the truncated\n Hilbert space.\n\n \"\"\"\n phim = phi0 + (2.0 * np.pi * m) / N\n n = np.arange(N).reshape((N, 1))\n data = 1.0 / np.sqrt(N) * np.exp(1.0j * n * phim)\n return Qobj(data)\n\n\ndef zero_ket(N, dims=None):\n \"\"\"\n Creates the zero ket vector with shape Nx1 and\n dimensions `dims`.\n\n Parameters\n ----------\n N : int\n Hilbert space dimensionality\n dims : list\n Optional dimensions if ket corresponds to\n a composite Hilbert space.\n\n Returns\n -------\n zero_ket : qobj\n Zero ket on given Hilbert space.\n\n \"\"\"\n return Qobj(sp.csr_matrix((N, 1), dtype=complex), dims=dims)\n\n\ndef spin_state(j, m, type='ket'):\n \"\"\"Generates the spin state |j, m>, i.e. the eigenstate\n of the spin-j Sz operator with eigenvalue m.\n\n Parameters\n ----------\n j : float\n The spin of the state ().\n\n m : int\n Eigenvalue of the spin-j Sz operator.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin state\n\n \"\"\"\n J = 2 * j + 1\n\n if type == 'ket':\n return basis(int(J), int(j - m))\n elif type == 'bra':\n return basis(int(J), int(j - m)).dag()\n elif type == 'dm':\n return fock_dm(int(J), int(j - m))\n else:\n raise ValueError(\"invalid value keyword argument 'type'\")\n\n\ndef spin_coherent(j, theta, phi, type='ket'):\n \"\"\"Generate the coherent spin state |theta, phi>.\n\n Parameters\n ----------\n j : float\n The spin of the state.\n\n theta : float\n Angle from z axis.\n\n phi : float\n Angle from x axis.\n\n type : string {'ket', 'bra', 'dm'}\n Type of state to generate.\n\n Returns\n -------\n state : qobj\n Qobj quantum object for spin coherent state\n\n \"\"\"\n Sp = jmat(j, '+')\n Sm = jmat(j, '-')\n psi = (0.5 * theta * np.exp(1j * phi) * Sm -\n 0.5 * theta * np.exp(-1j * phi) * Sp).expm() * spin_state(j, j)\n\n if type == 'ket':\n return psi\n elif type == 'bra':\n return psi.dag()\n elif type == 'dm':\n return ket2dm(psi)\n else:\n raise ValueError(\"invalid value keyword argument 'type'\")\n\n\ndef bell_state(state='00'):\n \"\"\"\n Returns the Bell state:\n\n |B00> = 1 / sqrt(2)*[|0>|0>+|1>|1>]\n |B01> = 1 / sqrt(2)*[|0>|0>-|1>|1>]\n |B10> = 1 / sqrt(2)*[|0>|1>+|1>|0>]\n |B11> = 1 / sqrt(2)*[|0>|1>-|1>|0>]\n\n Returns\n -------\n Bell_state : qobj\n Bell state\n\n \"\"\"\n if state == '00':\n Bell_state = tensor(\n basis(2), basis(2))+tensor(basis(2, 1), basis(2, 1))\n elif state == '01':\n Bell_state = tensor(\n basis(2), basis(2))-tensor(basis(2, 1), basis(2, 1))\n elif state == '10':\n Bell_state = tensor(\n basis(2), basis(2, 1))+tensor(basis(2, 1), basis(2))\n elif state == '11':\n Bell_state = tensor(\n basis(2), basis(2, 1))-tensor(basis(2, 1), basis(2))\n\n return Bell_state.unit()\n\n\ndef singlet_state():\n \"\"\"\n Returns the two particle singlet-state:\n\n |S>=1/sqrt(2)*[|0>|1>-|1>|0>]\n\n that is identical to the fourth bell state.\n\n Returns\n -------\n Bell_state : qobj\n |B11> Bell state\n\n \"\"\"\n return bell_state('11')\n\n\ndef triplet_states():\n \"\"\"\n Returns the two particle triplet-states:\n\n |T>= |1>|1>\n = 1 / sqrt(2)*[|0>|1>-|1>|0>]\n = |0>|0>\n that is identical to the fourth bell state.\n\n Returns\n -------\n trip_states : list\n 2 particle triplet states\n\n \"\"\"\n trip_states = []\n trip_states.append(tensor(basis(2, 1), basis(2, 1)))\n trip_states.append(\n (tensor(basis(2), basis(2, 1)) + tensor(basis(2, 1), basis(2))).unit()\n )\n trip_states.append(tensor(basis(2), basis(2)))\n return trip_states\n\n\ndef w_state(N=3):\n \"\"\"\n Returns the N-qubit W-state.\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n Returns\n -------\n W : qobj\n N-qubit W-state\n\n \"\"\"\n inds = np.zeros(N, dtype=int)\n inds[0] = 1\n state = tensor([basis(2, x) for x in inds])\n for kk in range(1, N):\n perm_inds = np.roll(inds, kk)\n state += tensor([basis(2, x) for x in perm_inds])\n return state.unit()\n\n\ndef ghz_state(N=3):\n \"\"\"\n Returns the N-qubit GHZ-state.\n\n Parameters\n ----------\n N : int (default=3)\n Number of qubits in state\n\n Returns\n -------\n G : qobj\n N-qubit GHZ-state\n\n \"\"\"\n state = (tensor([basis(2) for k in range(N)]) +\n tensor([basis(2, 1) for k in range(N)]))\n return state/np.sqrt(2)\n",
"path": "qutip/states.py"
}
] | diff --git a/qutip/states.py b/qutip/states.py
index 0345945771..d0710ad7df 100644
--- a/qutip/states.py
+++ b/qutip/states.py
@@ -1060,8 +1060,7 @@ def spin_state(j, m, type='ket'):
def spin_coherent(j, theta, phi, type='ket'):
- """Generates the spin state |j, m>, i.e. the eigenstate
- of the spin-j Sz operator with eigenvalue m.
+ """Generate the coherent spin state |theta, phi>.
Parameters
----------
|
cookiecutter__cookiecutter-588 | No way to define options that have no defaults
Currently if you set a value in `cookiecutter.json` to `null` it becomes `None` and is then turned into the _string_ `'None'`.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n",
"path": "cookiecutter/prompt.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n",
"path": "cookiecutter/prompt.py"
}
] | diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
index d06409c42..0809f9e56 100755
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -81,6 +81,8 @@ def read_user_choice(var_name, options):
def render_variable(env, raw, cookiecutter_dict):
+ if raw is None:
+ return None
if not isinstance(raw, basestring):
raw = str(raw)
template = env.from_string(raw)
diff --git a/tests/test_prompt.py b/tests/test_prompt.py
index 3000fda1a..583da1d84 100644
--- a/tests/test_prompt.py
+++ b/tests/test_prompt.py
@@ -22,7 +22,8 @@
(1, '1'),
(True, 'True'),
('foo', 'foo'),
- ('{{cookiecutter.project}}', 'foobar')
+ ('{{cookiecutter.project}}', 'foobar'),
+ (None, None),
])
def test_convert_to_str(mocker, raw_var, rendered_var):
env = Environment()
@@ -35,10 +36,13 @@ def test_convert_to_str(mocker, raw_var, rendered_var):
result = prompt.render_variable(env, raw_var, context)
assert result == rendered_var
- # Make sure that non str variables are conerted beforehand
- if not isinstance(raw_var, basestring):
- raw_var = str(raw_var)
- from_string.assert_called_once_with(raw_var)
+ # Make sure that non None non str variables are conerted beforehand
+ if raw_var is not None:
+ if not isinstance(raw_var, basestring):
+ raw_var = str(raw_var)
+ from_string.assert_called_once_with(raw_var)
+ else:
+ assert not from_string.called
@pytest.fixture(autouse=True)
|
openvinotoolkit__datumaro-125 | infer result passed from openvino launcher to interpreter is not appropriate.
I tried model run using openvino's mobileenet-v2-pytorch model.
(using mobilenet-v2-pytorch.xml, mobilenet-v2-pytorch.bin)
`datum model run -p proj -m model-0`
However, only the name of the layer (ex. 'prob' string) comes into the input parameters(outputs) of the interpreter. Please check the return result of OpenvinoLauncher.infer
`results = self._net.infer(inputs)` line 178, openvino_launcher.py
Debugging results are normal up to the code above, but it seems that only the name of the result layer is returned when returning and passing to interpreter.
| [
{
"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\n# pylint: disable=exec-used\n\nimport cv2\nimport logging as log\nimport numpy as np\nimport os.path as osp\nimport shutil\n\nfrom openvino.inference_engine import IECore\n\nfrom datumaro.components.cli_plugin import CliPlugin\nfrom datumaro.components.launcher import Launcher\n\n\nclass _OpenvinoImporter(CliPlugin):\n @staticmethod\n def _parse_output_layers(s):\n return [s.strip() for s in s.split(',')]\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('-d', '--description', required=True,\n help=\"Path to the model description file (.xml)\")\n parser.add_argument('-w', '--weights', required=True,\n help=\"Path to the model weights file (.bin)\")\n parser.add_argument('-i', '--interpreter', required=True,\n help=\"Path to the network output interprter script (.py)\")\n parser.add_argument('--device', default='CPU',\n help=\"Target device (default: %(default)s)\")\n parser.add_argument('--output-layers', type=cls._parse_output_layers,\n help=\"A comma-separated list of extra output layers\")\n return parser\n\n @staticmethod\n def copy_model(model_dir, model):\n shutil.copy(model['description'],\n osp.join(model_dir, osp.basename(model['description'])))\n model['description'] = osp.basename(model['description'])\n\n shutil.copy(model['weights'],\n osp.join(model_dir, osp.basename(model['weights'])))\n model['weights'] = osp.basename(model['weights'])\n\n shutil.copy(model['interpreter'],\n osp.join(model_dir, osp.basename(model['interpreter'])))\n model['interpreter'] = osp.basename(model['interpreter'])\n\n\nclass InterpreterScript:\n def __init__(self, path):\n with open(path, 'r') as f:\n script = f.read()\n\n context = {}\n exec(script, context, context)\n\n process_outputs = context.get('process_outputs')\n if not callable(process_outputs):\n raise Exception(\"Can't find 'process_outputs' function in \"\n \"the interpreter script\")\n self.__dict__['process_outputs'] = process_outputs\n\n get_categories = context.get('get_categories')\n assert get_categories is None or callable(get_categories)\n if get_categories:\n self.__dict__['get_categories'] = get_categories\n\n @staticmethod\n def get_categories():\n return None\n\n @staticmethod\n def process_outputs(inputs, outputs):\n raise NotImplementedError(\n \"Function should be implemented in the interpreter script\")\n\n\nclass OpenvinoLauncher(Launcher):\n cli_plugin = _OpenvinoImporter\n\n def __init__(self, description, weights, interpreter,\n device=None, model_dir=None, output_layers=None):\n if not model_dir:\n model_dir = ''\n if not osp.isfile(description):\n description = osp.join(model_dir, description)\n if not osp.isfile(description):\n raise Exception('Failed to open model description file \"%s\"' % \\\n (description))\n\n if not osp.isfile(weights):\n weights = osp.join(model_dir, weights)\n if not osp.isfile(weights):\n raise Exception('Failed to open model weights file \"%s\"' % \\\n (weights))\n\n if not osp.isfile(interpreter):\n interpreter = osp.join(model_dir, interpreter)\n if not osp.isfile(interpreter):\n raise Exception('Failed to open model interpreter script file \"%s\"' % \\\n (interpreter))\n\n self._interpreter = InterpreterScript(interpreter)\n\n self._device = device or 'CPU'\n self._output_blobs = output_layers\n\n self._ie = IECore()\n self._network = self._ie.read_network(description, weights)\n self._check_model_support(self._network, self._device)\n self._load_executable_net()\n\n def _check_model_support(self, net, device):\n not_supported_layers = set(name\n for name, dev in self._ie.query_network(net, device).items()\n if not dev)\n if len(not_supported_layers) != 0:\n log.error(\"The following layers are not supported \" \\\n \"by the plugin for device '%s': %s.\" % \\\n (device, ', '.join(not_supported_layers)))\n raise NotImplementedError(\n \"Some layers are not supported on the device\")\n\n def _load_executable_net(self, batch_size=1):\n network = self._network\n\n if self._output_blobs:\n network.add_outputs(self._output_blobs)\n\n iter_inputs = iter(network.input_info)\n self._input_blob = next(iter_inputs)\n\n # NOTE: handling for the inclusion of `image_info` in OpenVino2019\n self._require_image_info = 'image_info' in network.input_info\n if self._input_blob == 'image_info':\n self._input_blob = next(iter_inputs)\n\n self._input_layout = network.input_info[self._input_blob].input_data.shape\n self._input_layout[0] = batch_size\n network.reshape({self._input_blob: self._input_layout})\n self._batch_size = batch_size\n\n self._net = self._ie.load_network(network=network, num_requests=1,\n device_name=self._device)\n\n def infer(self, inputs):\n assert len(inputs.shape) == 4, \\\n \"Expected an input image in (N, H, W, C) format, got %s\" % \\\n (inputs.shape, )\n\n if inputs.shape[3] == 1: # A batch of single-channel images\n inputs = np.repeat(inputs, 3, axis=3)\n\n assert inputs.shape[3] == 3, \\\n \"Expected BGR input, got %s\" % (inputs.shape, )\n\n n, c, h, w = self._input_layout\n if inputs.shape[1:3] != (h, w):\n resized_inputs = np.empty((n, h, w, c), dtype=inputs.dtype)\n for inp, resized_input in zip(inputs, resized_inputs):\n cv2.resize(inp, (w, h), resized_input)\n inputs = resized_inputs\n inputs = inputs.transpose((0, 3, 1, 2)) # NHWC to NCHW\n inputs = {self._input_blob: inputs}\n if self._require_image_info:\n info = np.zeros([1, 3])\n info[0, 0] = h\n info[0, 1] = w\n info[0, 2] = 1.0 # scale\n inputs['image_info'] = info\n\n results = self._net.infer(inputs)\n if len(results) == 1:\n return next(iter(results))\n else:\n return results\n\n def launch(self, inputs):\n batch_size = len(inputs)\n if self._batch_size < batch_size:\n self._load_executable_net(batch_size)\n\n outputs = self.infer(inputs)\n results = self.process_outputs(inputs, outputs)\n return results\n\n def categories(self):\n return self._interpreter.get_categories()\n\n def process_outputs(self, inputs, outputs):\n return self._interpreter.process_outputs(inputs, outputs)\n\n",
"path": "datumaro/plugins/openvino_launcher.py"
}
] | [
{
"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\n# pylint: disable=exec-used\n\nimport cv2\nimport logging as log\nimport numpy as np\nimport os.path as osp\nimport shutil\n\nfrom openvino.inference_engine import IECore\n\nfrom datumaro.components.cli_plugin import CliPlugin\nfrom datumaro.components.launcher import Launcher\n\n\nclass _OpenvinoImporter(CliPlugin):\n @staticmethod\n def _parse_output_layers(s):\n return [s.strip() for s in s.split(',')]\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('-d', '--description', required=True,\n help=\"Path to the model description file (.xml)\")\n parser.add_argument('-w', '--weights', required=True,\n help=\"Path to the model weights file (.bin)\")\n parser.add_argument('-i', '--interpreter', required=True,\n help=\"Path to the network output interprter script (.py)\")\n parser.add_argument('--device', default='CPU',\n help=\"Target device (default: %(default)s)\")\n parser.add_argument('--output-layers', type=cls._parse_output_layers,\n help=\"A comma-separated list of extra output layers\")\n return parser\n\n @staticmethod\n def copy_model(model_dir, model):\n shutil.copy(model['description'],\n osp.join(model_dir, osp.basename(model['description'])))\n model['description'] = osp.basename(model['description'])\n\n shutil.copy(model['weights'],\n osp.join(model_dir, osp.basename(model['weights'])))\n model['weights'] = osp.basename(model['weights'])\n\n shutil.copy(model['interpreter'],\n osp.join(model_dir, osp.basename(model['interpreter'])))\n model['interpreter'] = osp.basename(model['interpreter'])\n\n\nclass InterpreterScript:\n def __init__(self, path):\n with open(path, 'r') as f:\n script = f.read()\n\n context = {}\n exec(script, context, context)\n\n process_outputs = context.get('process_outputs')\n if not callable(process_outputs):\n raise Exception(\"Can't find 'process_outputs' function in \"\n \"the interpreter script\")\n self.__dict__['process_outputs'] = process_outputs\n\n get_categories = context.get('get_categories')\n assert get_categories is None or callable(get_categories)\n if get_categories:\n self.__dict__['get_categories'] = get_categories\n\n @staticmethod\n def get_categories():\n return None\n\n @staticmethod\n def process_outputs(inputs, outputs):\n raise NotImplementedError(\n \"Function should be implemented in the interpreter script\")\n\n\nclass OpenvinoLauncher(Launcher):\n cli_plugin = _OpenvinoImporter\n\n def __init__(self, description, weights, interpreter,\n device=None, model_dir=None, output_layers=None):\n if not model_dir:\n model_dir = ''\n if not osp.isfile(description):\n description = osp.join(model_dir, description)\n if not osp.isfile(description):\n raise Exception('Failed to open model description file \"%s\"' % \\\n (description))\n\n if not osp.isfile(weights):\n weights = osp.join(model_dir, weights)\n if not osp.isfile(weights):\n raise Exception('Failed to open model weights file \"%s\"' % \\\n (weights))\n\n if not osp.isfile(interpreter):\n interpreter = osp.join(model_dir, interpreter)\n if not osp.isfile(interpreter):\n raise Exception('Failed to open model interpreter script file \"%s\"' % \\\n (interpreter))\n\n self._interpreter = InterpreterScript(interpreter)\n\n self._device = device or 'CPU'\n self._output_blobs = output_layers\n\n self._ie = IECore()\n self._network = self._ie.read_network(description, weights)\n self._check_model_support(self._network, self._device)\n self._load_executable_net()\n\n def _check_model_support(self, net, device):\n not_supported_layers = set(name\n for name, dev in self._ie.query_network(net, device).items()\n if not dev)\n if len(not_supported_layers) != 0:\n log.error(\"The following layers are not supported \" \\\n \"by the plugin for device '%s': %s.\" % \\\n (device, ', '.join(not_supported_layers)))\n raise NotImplementedError(\n \"Some layers are not supported on the device\")\n\n def _load_executable_net(self, batch_size=1):\n network = self._network\n\n if self._output_blobs:\n network.add_outputs(self._output_blobs)\n\n iter_inputs = iter(network.input_info)\n self._input_blob = next(iter_inputs)\n\n # NOTE: handling for the inclusion of `image_info` in OpenVino2019\n self._require_image_info = 'image_info' in network.input_info\n if self._input_blob == 'image_info':\n self._input_blob = next(iter_inputs)\n\n self._input_layout = network.input_info[self._input_blob].input_data.shape\n self._input_layout[0] = batch_size\n network.reshape({self._input_blob: self._input_layout})\n self._batch_size = batch_size\n\n self._net = self._ie.load_network(network=network, num_requests=1,\n device_name=self._device)\n\n def infer(self, inputs):\n assert len(inputs.shape) == 4, \\\n \"Expected an input image in (N, H, W, C) format, got %s\" % \\\n (inputs.shape, )\n\n if inputs.shape[3] == 1: # A batch of single-channel images\n inputs = np.repeat(inputs, 3, axis=3)\n\n assert inputs.shape[3] == 3, \\\n \"Expected BGR input, got %s\" % (inputs.shape, )\n\n n, c, h, w = self._input_layout\n if inputs.shape[1:3] != (h, w):\n resized_inputs = np.empty((n, h, w, c), dtype=inputs.dtype)\n for inp, resized_input in zip(inputs, resized_inputs):\n cv2.resize(inp, (w, h), resized_input)\n inputs = resized_inputs\n inputs = inputs.transpose((0, 3, 1, 2)) # NHWC to NCHW\n inputs = {self._input_blob: inputs}\n if self._require_image_info:\n info = np.zeros([1, 3])\n info[0, 0] = h\n info[0, 1] = w\n info[0, 2] = 1.0 # scale\n inputs['image_info'] = info\n\n results = self._net.infer(inputs)\n if len(results) == 1:\n return next(iter(results.values()))\n else:\n return results\n\n def launch(self, inputs):\n batch_size = len(inputs)\n if self._batch_size < batch_size:\n self._load_executable_net(batch_size)\n\n outputs = self.infer(inputs)\n results = self.process_outputs(inputs, outputs)\n return results\n\n def categories(self):\n return self._interpreter.get_categories()\n\n def process_outputs(self, inputs, outputs):\n return self._interpreter.process_outputs(inputs, outputs)\n\n",
"path": "datumaro/plugins/openvino_launcher.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 12e1062671..f16e5fce62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,7 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
-
### Fixed
--
+- Inference result for only one output layer in OpenVINO launcher (<https://github.com/openvinotoolkit/datumaro/pull/125>)
### Security
-
diff --git a/datumaro/plugins/openvino_launcher.py b/datumaro/plugins/openvino_launcher.py
index 6a21750356..7c64d6fa44 100644
--- a/datumaro/plugins/openvino_launcher.py
+++ b/datumaro/plugins/openvino_launcher.py
@@ -177,7 +177,7 @@ def infer(self, inputs):
results = self._net.infer(inputs)
if len(results) == 1:
- return next(iter(results))
+ return next(iter(results.values()))
else:
return results
|
ietf-tools__datatracker-6907 | the ietf meeting parts of upcoming.ics end a day early
Our custom ics should take into account (from RFC5545)
>The "DTEND" property
for a "VEVENT" calendar component specifies the non-inclusive end
of the event.
See
https://github.com/ietf-tools/datatracker/blob/287cf0fe46c0b1b7548389b4327854567e6b29f8/ietf/templates/meeting/upcoming.ics#L28
| [
{
"content": "# Copyright The IETF Trust 2007-2023, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport datetime\nimport re\nfrom urllib.parse import urljoin\nfrom zoneinfo import ZoneInfo\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.html import escape\nfrom django.template.defaultfilters import truncatewords_html, linebreaksbr, stringfilter, striptags\nfrom django.utils.safestring import mark_safe, SafeData\nfrom django.utils.html import strip_tags\nfrom django.utils.encoding import force_str\nfrom django.urls import reverse as urlreverse\nfrom django.core.cache import cache\nfrom django.core.exceptions import ValidationError\nfrom django.urls import NoReverseMatch\nfrom django.utils import timezone\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import BallotDocEvent, Document\nfrom ietf.doc.models import ConsensusDocEvent\nfrom ietf.ietfauth.utils import can_request_rfc_publication as utils_can_request_rfc_publication\nfrom ietf.utils.html import sanitize_fragment\nfrom ietf.utils import log\nfrom ietf.doc.utils import prettify_std_name\nfrom ietf.utils.text import wordwrap, fill, wrap_text_if_unwrapped, bleach_linker, bleach_cleaner, validate_url\n\nregister = template.Library()\n\ndef collapsebr(html):\n return re.sub('(<(br ?/|/p)>[ \\n]*)(<(br) ?/?>[ \\n]*)*(<(br|p) ?/?>[ \\n]*)', '\\\\1\\\\5', html)\n\[email protected]\ndef indent(value, numspaces=2):\n replacement = \"\\n\" + \" \" * int(numspaces)\n res = value.replace(\"\\n\", replacement)\n if res.endswith(replacement):\n res = res[:-int(numspaces)] # fix up superfluous spaces\n return res\n\[email protected]\ndef unindent(value):\n \"\"\"Remove indentation from string.\"\"\"\n return re.sub(\"\\n +\", \"\\n\", value)\n\n# there's an \"ahref -> a href\" in GEN_UTIL\n# but let's wait until we understand what that's for.\[email protected](name='make_one_per_line')\ndef make_one_per_line(value):\n \"\"\"\n Turn a comma-separated list into a carriage-return-seperated list.\n\n >>> force_str(make_one_per_line(\"a, b, c\"))\n 'a\\\\nb\\\\nc'\n\n Pass through non-strings:\n\n >>> make_one_per_line([1, 2])\n [1, 2]\n\n >>> make_one_per_line(None)\n\n \"\"\"\n if value and isinstance(value, str):\n return re.sub(\", ?\", \"\\n\", value)\n elif value and isinstance(value, bytes):\n log.assertion('isinstance(value, str)')\n else:\n return value\n\[email protected](name='keep_spacing')\ndef keep_spacing(value):\n \"\"\"\n Replace any two spaces with one and one space so that\n HTML output doesn't collapse them.\"\"\"\n return value.replace(' ', ' ')\n\[email protected](name='format_textarea')\ndef format_textarea(value):\n \"\"\"\n Escapes HTML, except for <b>, </b>, <br>.\n\n Adds <br> at the end like the builtin linebreaksbr.\n\n Also calls keep_spacing.\"\"\"\n return keep_spacing(linebreaksbr(escape(value).replace('<b>','<b>').replace('</b>','</b>').replace('<br>','<br>')))\n\[email protected](name='sanitize')\ndef sanitize(value):\n \"\"\"Sanitizes an HTML fragment.\n This means both fixing broken html and restricting elements and\n attributes to those deemed acceptable. See ietf/utils/html.py\n for the details.\n \"\"\"\n return mark_safe(sanitize_fragment(value))\n\n\n# For use with ballot view\[email protected](name='bracket')\ndef square_brackets(value):\n \"\"\"Adds square brackets around text.\"\"\"\n if isinstance(value, str):\n if value == \"\":\n value = \" \"\n return \"[ %s ]\" % value\n elif isinstance(value, bytes):\n log.assertion('isinstance(value, str)')\n elif value > 0:\n return \"[ X ]\"\n elif value < 0:\n return \"[ . ]\"\n else:\n return \"[ ]\"\n\[email protected](name='bracketpos')\ndef bracketpos(pos,posslug):\n if pos.pos.slug==posslug:\n return \"[ X ]\"\n elif posslug in [x.slug for x in pos.old_positions]:\n return \"[ . ]\"\n else:\n return \"[ ]\"\n\nregister.filter('fill', fill)\n\[email protected]\ndef prettystdname(string, space=\" \"):\n from ietf.doc.utils import prettify_std_name\n return prettify_std_name(force_str(string or \"\"), space)\n\[email protected]\ndef rfceditor_info_url(rfcnum : str):\n \"\"\"Link to the RFC editor info page for an RFC\"\"\"\n return urljoin(settings.RFC_EDITOR_INFO_BASE_URL, f'rfc{rfcnum}')\n\n\ndef doc_name(name):\n \"\"\"Check whether a given document exists, and return its canonical name\"\"\"\n\n def find_unique(n):\n key = hash(n)\n found = cache.get(key)\n if not found:\n exact = Document.objects.filter(name=n).first()\n found = exact.name if exact else \"_\"\n # TODO review this cache policy (and the need for these entire function)\n cache.set(key, found, timeout=60*60*24) # cache for one day\n return None if found == \"_\" else found\n\n # chop away extension\n extension_split = re.search(r\"^(.+)\\.(txt|ps|pdf|html)$\", name)\n if extension_split:\n name = extension_split.group(1)\n\n if find_unique(name):\n return name\n\n # check for embedded rev - this may be ambiguous, so don't\n # chop it off if we don't find a match\n rev_split = re.search(r\"^(charter-.+)-(\\d{2}-\\d{2})$\", name) or re.search(\n r\"^(.+)-(\\d{2}|[1-9]\\d{2,})$\", name\n )\n if rev_split:\n name = rev_split.group(1)\n if find_unique(name):\n return name\n\n return \"\"\n\n\ndef link_charter_doc_match(match):\n if not doc_name(match[0]):\n return match[0]\n url = urlreverse(\n \"ietf.doc.views_doc.document_main\",\n kwargs=dict(name=match[1][:-1], rev=match[2]),\n )\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n\ndef link_non_charter_doc_match(match):\n name = match[0]\n # handle \"I-D.*\"\" reference-style matches\n name = re.sub(r\"^i-d\\.(.*)\", r\"draft-\\1\", name, flags=re.IGNORECASE)\n cname = doc_name(name)\n if not cname:\n return match[0]\n if name == cname:\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=cname))\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n # if we get here, the name probably has a version number and/or extension at the end\n rev_split = re.search(r\"^(\" + re.escape(cname) + r\")-(\\d{2,})\", name)\n if rev_split:\n name = rev_split.group(1)\n else:\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=cname))\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n cname = doc_name(name)\n if not cname:\n return match[0]\n if name == cname:\n try:\n url = urlreverse(\n \"ietf.doc.views_doc.document_main\",\n kwargs=dict(name=cname, rev=rev_split.group(2)),\n )\n except NoReverseMatch:\n return match[0]\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n # if we get here, we can't linkify\n return match[0]\n\n\ndef link_other_doc_match(match):\n doc = match[2].strip().lower()\n rev = match[3]\n if not doc_name(doc + rev):\n return match[0]\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=doc + rev))\n return f'<a href=\"{url}\">{match[1]}</a>'\n\[email protected](name=\"urlize_ietf_docs\", is_safe=True, needs_autoescape=True)\ndef urlize_ietf_docs(string, autoescape=None):\n \"\"\"\n Make occurrences of RFC NNNN and draft-foo-bar links to the doc pages.\n \"\"\"\n if autoescape and not isinstance(string, SafeData):\n if \"<\" in string:\n string = escape(string)\n else:\n string = mark_safe(string)\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])(charter-(?:[\\d\\w\\.+]+-)*)(\\d{2}(?:-\\d{2}))(\\.(?:txt|ps|pdf|html))?\\b\",\n link_charter_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])((?:draft-|i-d\\.|bofreq-|conflict-review-|status-change-)[\\d\\w\\.+-]+(?![-@]))\",\n link_non_charter_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])((RFC|BCP|STD|FYI) *\\n? *0*(\\d+))\\b\",\n link_other_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n\n return mark_safe(string)\n\n\nurlize_ietf_docs = stringfilter(urlize_ietf_docs)\n\[email protected](name='urlize_related_source_list', is_safe=True, document_html=False)\ndef urlize_related_source_list(related, document_html=False):\n \"\"\"Convert a list of RelatedDocuments into list of links using the source document's canonical name\"\"\"\n links = []\n names = set()\n titles = set()\n for rel in related:\n name=rel.source.name\n title = rel.source.title\n if name in names and title in titles:\n continue\n names.add(name)\n titles.add(title)\n url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name))\n name = escape(name)\n title = escape(title)\n links.append(mark_safe(\n '<a href=\"%(url)s\" title=\"%(title)s\">%(name)s</a>' % dict(name=prettify_std_name(name),\n title=title,\n url=url)\n ))\n return links\n \[email protected](name='urlize_related_target_list', is_safe=True, document_html=False)\ndef urlize_related_target_list(related, document_html=False):\n \"\"\"Convert a list of RelatedDocuments into list of links using the target document's canonical name\"\"\"\n links = []\n for rel in related:\n name=rel.target.name\n title = rel.target.title\n url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name))\n name = escape(name)\n title = escape(title)\n links.append(mark_safe(\n '<a href=\"%(url)s\" title=\"%(title)s\">%(name)s</a>' % dict(name=prettify_std_name(name),\n title=title,\n url=url)\n ))\n return links\n \[email protected](name='dashify')\ndef dashify(string):\n \"\"\"\n Replace each character in string with '-', to produce\n an underline effect for plain text files.\n \"\"\"\n return re.sub('.', '-', string)\n\[email protected]\ndef underline(string):\n \"\"\"Return string with an extra line underneath of dashes, for plain text underlining.\"\"\"\n return string + \"\\n\" + (\"-\" * len(string))\n\[email protected](name='timesince_days')\ndef timesince_days(date):\n \"\"\"Returns the number of days since 'date' (relative to now)\n\n >>> timesince_days(timezone.now() - datetime.timedelta(days=2))\n 2\n\n >>> tz = ZoneInfo(settings.TIME_ZONE)\n >>> timesince_days(timezone.now().astimezone(tz).date() - datetime.timedelta(days=2))\n 2\n\n \"\"\"\n if date.__class__ is not datetime.datetime:\n date = datetime.datetime(date.year, date.month, date.day, tzinfo=ZoneInfo(settings.TIME_ZONE))\n delta = timezone.now() - date\n return delta.days\n\[email protected]\ndef split(text, splitter=None):\n return text.split(splitter)\n\nregister.filter(\"maybewordwrap\", stringfilter(wrap_text_if_unwrapped))\n\nregister.filter(\"wordwrap\", stringfilter(wordwrap))\n\[email protected](name=\"compress_empty_lines\")\ndef compress_empty_lines(text):\n text = re.sub(\"( *\\n){3,}\", \"\\n\\n\", text)\n return text\n\[email protected](name='linebreaks_crlf')\ndef linebreaks_crlf(text):\n \"\"\"\n Normalize all linebreaks to CRLF.\n \"\"\"\n # First, map CRLF to LF\n text = text.replace(\"\\r\\n\", \"\\n\")\n # Next, map lone CRs to LFs\n text = text.replace(\"\\r\", \"\\n\")\n # Finally, map LFs to CRLFs\n text = text.replace(\"\\n\", \"\\r\\n\")\n return text\n\[email protected](name='linebreaks_lf')\ndef linebreaks_lf(text):\n \"\"\"\n Normalize all linebreaks to LF.\n \"\"\"\n # First, map CRLF to LF\n text = text.replace(\"\\r\\n\", \"\\n\")\n # Finally, map lone CRs to LFs\n text = text.replace(\"\\r\", \"\\n\")\n return text\n\[email protected](name='clean_whitespace')\ndef clean_whitespace(text):\n \"\"\"\n Map all ASCII control characters (0x00-0x1F) to spaces, and\n remove unnecessary spaces.\n \"\"\"\n text = re.sub(\"[\\000-\\040]+\", \" \", text)\n return text.strip()\n\[email protected](name='unescape')\ndef unescape(text):\n \"\"\"\n Unescape />/<\n \"\"\"\n text = text.replace(\">\", \">\")\n text = text.replace(\"<\", \"<\")\n text = text.replace(\"&\", \"&\")\n text = text.replace(\"<br>\", \"\\n\")\n text = text.replace(\"<br/>\", \"\\n\")\n return text\n\[email protected](name='new_enough')\ndef new_enough(x,request):\n days = int(settings.USER_PREFERENCE_DEFAULTS[\"new_enough\"])\n value = request.COOKIES.get(\"new_enough\", None)\n if value and value.isdigit():\n days = int(value)\n return x < days\n\[email protected](name='expires_soon')\ndef expires_soon(x,request):\n days = int(settings.USER_PREFERENCE_DEFAULTS[\"expires_soon\"])\n value = request.COOKIES.get(\"expires_soon\", None)\n if value and value.isdigit():\n days = int(value)\n return x > -days\n\[email protected](name='startswith')\ndef startswith(x, y):\n return str(x).startswith(y)\n\n\[email protected](name='removeprefix', is_safe=False)\ndef removeprefix(value, prefix):\n \"\"\"Remove an exact-match prefix\n \n The is_safe flag is False because indiscriminate use of this could result in non-safe output.\n See https://docs.djangoproject.com/en/2.2/howto/custom-template-tags/#filters-and-auto-escaping\n which describes the possibility that removing characters from an escaped string may introduce\n HTML-unsafe output.\n \"\"\"\n base = str(value)\n if base.startswith(prefix):\n return base[len(prefix):]\n else:\n return base\n\n\[email protected]\ndef has_role(user, role_names):\n from ietf.ietfauth.utils import has_role\n if not user:\n return False\n return has_role(user, role_names.split(','))\n\[email protected]\ndef ad_area(user):\n if user and user.is_authenticated:\n from ietf.group.models import Group\n g = Group.objects.filter(role__name__in=(\"pre-ad\", \"ad\"), role__person__user=user)\n if g:\n return g[0].acronym\n return None\n\[email protected]\ndef format_history_text(text, trunc_words=25):\n \"\"\"Run history text through some cleaning and add ellipsis if it's too long.\"\"\"\n full = mark_safe(bleach_cleaner.clean(text))\n full = bleach_linker.linkify(urlize_ietf_docs(full))\n\n return format_snippet(full, trunc_words)\n\[email protected]\ndef format_snippet(text, trunc_words=25): \n # urlize if there aren't already links present\n text = bleach_linker.linkify(text)\n full = keep_spacing(collapsebr(linebreaksbr(mark_safe(sanitize_fragment(text)))))\n snippet = truncatewords_html(full, trunc_words)\n if snippet != full:\n return mark_safe('<div class=\"snippet\">%s<button type=\"button\" aria-label=\"Expand\" class=\"btn btn-sm btn-primary show-all\"><i class=\"bi bi-caret-down\"></i></button></div><div class=\"d-none full\">%s</div>' % (snippet, full))\n return mark_safe(full)\n\[email protected]_tag\ndef doc_edit_button(url_name, *args, **kwargs):\n \"\"\"Given URL name/args/kwargs, looks up the URL just like \"url\" tag and returns a properly formatted button for the document material tables.\"\"\"\n return mark_safe('<a class=\"btn btn-primary btn-sm\" href=\"%s\">Edit</a>' % (urlreverse(url_name, args=args, kwargs=kwargs)))\n\[email protected]\ndef textify(text):\n text = re.sub(\"</?b>\", \"*\", text)\n text = re.sub(\"</?i>\", \"/\", text)\n # There are probably additional conversions we should apply here\n return text\n\[email protected]\ndef state(doc, slug):\n if slug == \"stream\": # convenient shorthand\n slug = \"%s-stream-%s\" % (doc.type_id, doc.stream_id)\n return doc.get_state(slug)\n\[email protected]\ndef statehelp(state):\n \"Output help icon with tooltip for state.\"\n from django.urls import reverse as urlreverse\n tooltip = escape(strip_tags(state.desc))\n url = urlreverse('ietf.doc.views_help.state_help', kwargs=dict(type=state.type_id)) + \"#\" + state.slug\n return mark_safe('<a class=\"state-help-icon\" href=\"%s\" title=\"%s\">?</a>' % (url, tooltip))\n\[email protected]\ndef sectionlevel(section_number):\n return section_number.count(\".\") + 1\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n _test()\n\[email protected]\ndef plural(text, seq, arg='s'):\n \"Similar to pluralize, but looks at the text, too\"\n from django.template.defaultfilters import pluralize\n if text.endswith('s'):\n return text\n else:\n return text + pluralize(len(seq), arg)\n\[email protected]\ndef ics_esc(text):\n text = re.sub(r\"([\\n,;\\\\])\", r\"\\\\\\1\", text)\n return text\n\n\[email protected]_tag\ndef ics_date_time(dt, tzname):\n \"\"\"Render a datetime as an iCalendar date-time\n\n dt a datetime, localized to the timezone to be displayed\n tzname is the name for this timezone\n\n Caller must arrange for a VTIMEZONE for the tzname to be included in the iCalendar file.\n Output includes a ':'. Use like:\n DTSTART{% ics_date_time timestamp 'America/Los_Angeles' %}\n to get\n DTSTART;TZID=America/Los_Angeles:20221021T111200\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'utc')\n ':20220102T030405Z'\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'UTC')\n ':20220102T030405Z'\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'America/Los_Angeles')\n ';TZID=America/Los_Angeles:20220102T030405'\n \"\"\"\n timestamp = dt.strftime('%Y%m%dT%H%M%S')\n if tzname.lower() == 'utc':\n return f':{timestamp}Z'\n else:\n return f';TZID={ics_esc(tzname)}:{timestamp}'\n\n\[email protected]\ndef consensus(doc):\n \"\"\"Returns document consensus Yes/No/Unknown.\"\"\"\n event = doc.latest_event(ConsensusDocEvent,type=\"changed_consensus\")\n if event:\n if event.consensus:\n return \"Yes\"\n else:\n return \"No\"\n else:\n return \"Unknown\"\n\n\[email protected]\ndef std_level_to_label_format(doc):\n \"\"\"Returns valid Bootstrap classes to label a status level badge.\"\"\"\n if doc.type_id == \"rfc\":\n if doc.related_that(\"obs\"):\n return \"obs\"\n else:\n return doc.std_level_id\n else:\n return \"draft\"\n\n\[email protected]\ndef pos_to_label_format(text):\n \"\"\"Returns valid Bootstrap classes to label a ballot position.\"\"\"\n return {\n 'Yes': 'bg-yes text-light',\n 'No Objection': 'bg-noobj text-dark',\n 'Abstain': 'bg-abstain text-light',\n 'Discuss': 'bg-discuss text-light',\n 'Block': 'bg-discuss text-light',\n 'Recuse': 'bg-recuse text-light',\n 'Not Ready': 'bg-discuss text-light',\n 'Need More Time': 'bg-discuss text-light',\n 'Concern': 'bg-discuss text-light',\n\n }.get(str(text), 'bg-norecord text-dark')\n\[email protected]\ndef pos_to_border_format(text):\n \"\"\"Returns valid Bootstrap classes to label a ballot position border.\"\"\"\n return {\n 'Yes': 'border-yes',\n 'No Objection': 'border-noobj',\n 'Abstain': 'border-abstain',\n 'Discuss': 'border-discuss',\n 'Block': 'border-discuss',\n 'Recuse': 'border-recuse',\n 'Not Ready': 'border-discuss',\n 'Need More Time': 'border-discuss',\n 'Concern': 'border-discuss',\n }.get(str(text), 'border-norecord')\n\[email protected]\ndef capfirst_allcaps(text):\n \"\"\"Like capfirst, except it doesn't lowercase words in ALL CAPS.\"\"\"\n result = text\n i = False\n for token in re.split(r\"(\\W+)\", striptags(text)):\n if not re.match(r\"^[A-Z]+$\", token):\n if not i:\n result = result.replace(token, token.capitalize())\n i = True\n else:\n result = result.replace(token, token.lower())\n return result\n\[email protected]\ndef lower_allcaps(text):\n \"\"\"Like lower, except it doesn't lowercase words in ALL CAPS.\"\"\"\n result = text\n for token in re.split(r\"(\\W+)\", striptags(text)):\n if not re.match(r\"^[A-Z]+$\", token):\n result = result.replace(token, token.lower())\n return result\n\[email protected]\ndef document_content(doc):\n if doc is None:\n return None\n content = doc.text_or_error() # pyflakes:ignore\n return content\n\[email protected]\ndef format_timedelta(timedelta):\n s = timedelta.seconds\n hours, remainder = divmod(s, 3600)\n minutes, seconds = divmod(remainder, 60)\n return '{hours:02d}:{minutes:02d}'.format(hours=hours,minutes=minutes)\n\[email protected]()\ndef comma_separated_list(seq, end_word=\"and\"):\n if len(seq) < 2:\n return \"\".join(seq)\n else:\n return \", \".join(seq[:-1]) + \" %s %s\"%(end_word, seq[-1])\n\[email protected]()\ndef zaptmp(s):\n return re.sub(r'/tmp/tmp[^/]+/', '', s)\n\[email protected]()\ndef rfcbis(s):\n m = re.search(r'^.*-rfc(\\d+)-?bis(-.*)?$', s)\n return None if m is None else 'rfc' + m.group(1) \n\[email protected]\n@stringfilter\ndef urlize(value):\n raise RuntimeError(\"Use linkify from textfilters instead of urlize\")\n \[email protected]\n@stringfilter\ndef charter_major_rev(rev):\n return rev[:2]\n\[email protected]\n@stringfilter\ndef charter_minor_rev(rev):\n return rev[3:5]\n\[email protected]()\ndef can_defer(user,doc):\n ballot = doc.latest_event(BallotDocEvent, type=\"created_ballot\")\n if ballot and (doc.type_id == \"draft\" or doc.type_id == \"conflrev\" or doc.type_id==\"statchg\") and doc.stream_id == 'ietf' and has_role(user, 'Area Director,Secretariat'):\n return True\n else:\n return False\n\[email protected]()\ndef can_clear_ballot(user, doc):\n return can_defer(user, doc)\n\[email protected]()\ndef can_request_rfc_publication(user, doc):\n return utils_can_request_rfc_publication(user, doc)\n\[email protected]()\ndef can_ballot(user,doc):\n if doc.stream_id == \"irtf\" and doc.type_id == \"draft\":\n return has_role(user,\"IRSG Member\")\n elif doc.stream_id == \"editorial\" and doc.type_id == \"draft\":\n return has_role(user,\"RSAB Member\")\n else:\n return user.person.role_set.filter(name=\"ad\", group__type=\"area\", group__state=\"active\")\n\n\[email protected]\ndef action_holder_badge(action_holder):\n \"\"\"Add a warning tag if action holder age exceeds limit\n\n >>> from ietf.doc.factories import DocumentActionHolderFactory\n >>> old_limit = settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS\n >>> settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS = 15\n >>> action_holder_badge(DocumentActionHolderFactory())\n ''\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=15)))\n ''\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=16)))\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for 16 days; goal is <15 days.\"><i class=\"bi bi-clock-fill\"></i> 16</span>'\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=30)))\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for 30 days; goal is <15 days.\"><i class=\"bi bi-clock-fill\"></i> 30</span>'\n\n >>> settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS = old_limit\n \"\"\"\n age_limit = settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS\n age = (timezone.now() - action_holder.time_added).days\n if age > age_limit:\n return mark_safe(\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for %d day%s; goal is <%d days.\"><i class=\"bi bi-clock-fill\"></i> %d</span>'\n % (age, \"s\" if age != 1 else \"\", age_limit, age)\n )\n else:\n return \"\" # no alert needed\n\n\[email protected]\ndef is_regular_agenda_item(assignment):\n \"\"\"Is this agenda item a regular session item?\n\n A regular item appears as a sub-entry in a timeslot within the agenda\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> is_regular_agenda_item(factory('regular'))\n True\n\n >>> any(is_regular_agenda_item(factory(t)) for t in ['plenary', 'break', 'reg', 'other', 'officehours'])\n False\n\n >>> is_regular_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug == 'regular'\n\[email protected]\ndef is_plenary_agenda_item(assignment):\n \"\"\"Is this agenda item a regular session item?\n\n A regular item appears as a sub-entry in a timeslot within the agenda\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> is_plenary_agenda_item(factory('plenary'))\n True\n\n >>> any(is_plenary_agenda_item(factory(t)) for t in ['regular', 'break', 'reg', 'other', 'officehours'])\n False\n\n >>> is_plenary_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug == 'plenary'\n\[email protected]\ndef is_special_agenda_item(assignment):\n \"\"\"Is this agenda item a special item?\n\n Special items appear as top-level agenda entries with their own timeslot information.\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> all(is_special_agenda_item(factory(t)) for t in ['break', 'reg', 'other', 'officehours'])\n True\n\n >>> any(is_special_agenda_item(factory(t)) for t in ['regular', 'plenary'])\n False\n\n >>> is_special_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug in [\n 'break',\n 'reg',\n 'other',\n 'officehours',\n ]\n\[email protected]\ndef should_show_agenda_session_buttons(assignment):\n \"\"\"Should this agenda item show the session buttons (chat link, etc)?\n\n In IETF-112 and earlier, office hours sessions were designated by a name ending\n with ' office hours' and belonged to the IESG or some other group. This led to\n incorrect session buttons being displayed. Suppress session buttons for\n when name ends with 'office hours' in the pre-112 meetings.\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_meeting = namedtuple('t3', ['number'])\n >>> mock_session = namedtuple('t2', ['name'])\n >>> mock_assignment = namedtuple('t1', ['meeting', 'session']) # meeting must be a callable\n >>> factory = lambda num, name: mock_assignment(session=mock_session(name), meeting=lambda: mock_meeting(num))\n >>> test_cases = [('105', 'acme office hours'), ('112', 'acme office hours')]\n >>> any(should_show_agenda_session_buttons(factory(*tc)) for tc in test_cases)\n False\n >>> test_cases = [('interim-2020-acme-113', 'acme'), ('113', 'acme'), ('150', 'acme'), ('105', 'acme'),]\n >>> test_cases.extend([('112', 'acme'), ('interim-2020-acme-113', 'acme office hours')])\n >>> test_cases.extend([('113', 'acme office hours'), ('150', 'acme office hours')])\n >>> all(should_show_agenda_session_buttons(factory(*tc)) for tc in test_cases)\n True\n >>> should_show_agenda_session_buttons(None)\n False\n \"\"\"\n if assignment is None:\n return False\n num = assignment.meeting().number\n if num.isdigit() and int(num) <= settings.MEETING_LEGACY_OFFICE_HOURS_END:\n return not assignment.session.name.lower().endswith(' office hours')\n else:\n return True\n\n\[email protected]_tag\ndef absurl(viewname, **kwargs):\n \"\"\"Get the absolute URL for a view by name\n\n Uses settings.IDTRACKER_BASE_URL as the base.\n \"\"\"\n return urljoin(settings.IDTRACKER_BASE_URL, urlreverse(viewname, kwargs=kwargs))\n\n\[email protected]\ndef is_valid_url(url):\n \"\"\"\n Check if the given URL is syntactically valid\n \"\"\"\n try:\n validate_url(url)\n except ValidationError:\n return False\n return True\n\n\[email protected]\ndef badgeify(blob):\n \"\"\"\n Add an appropriate bootstrap badge around \"text\", based on its contents.\n \"\"\"\n config = [\n (r\"rejected|not ready\", \"danger\", \"x-lg\"),\n (r\"complete|accepted|ready\", \"success\", \"\"),\n (r\"has nits|almost ready\", \"info\", \"info-lg\"),\n (r\"has issues\", \"warning\", \"exclamation-lg\"),\n (r\"assigned\", \"info\", \"person-plus-fill\"),\n (r\"will not review|overtaken by events|withdrawn\", \"secondary\", \"dash-lg\"),\n (r\"no response\", \"warning\", \"question-lg\"),\n ]\n text = str(blob)\n\n for pattern, color, icon in config:\n if re.search(pattern, text, flags=re.IGNORECASE):\n # Shorten the badge text\n text = re.sub(r\"with \", \"w/\", text, flags=re.IGNORECASE)\n text = re.sub(r\"document\", \"doc\", text, flags=re.IGNORECASE)\n text = re.sub(r\"will not\", \"won't\", text, flags=re.IGNORECASE)\n\n return mark_safe(\n f\"\"\"\n <span class=\"badge rounded-pill text-bg-{color} text-wrap\">\n <i class=\"bi bi-{icon}\"></i> {text.capitalize()}\n </span>\n \"\"\"\n )\n\n return text\n",
"path": "ietf/doc/templatetags/ietf_filters.py"
}
] | [
{
"content": "# Copyright The IETF Trust 2007-2023, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport datetime\nimport re\nfrom urllib.parse import urljoin\nfrom zoneinfo import ZoneInfo\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.html import escape\nfrom django.template.defaultfilters import truncatewords_html, linebreaksbr, stringfilter, striptags\nfrom django.utils.safestring import mark_safe, SafeData\nfrom django.utils.html import strip_tags\nfrom django.utils.encoding import force_str\nfrom django.urls import reverse as urlreverse\nfrom django.core.cache import cache\nfrom django.core.exceptions import ValidationError\nfrom django.urls import NoReverseMatch\nfrom django.utils import timezone\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import BallotDocEvent, Document\nfrom ietf.doc.models import ConsensusDocEvent\nfrom ietf.ietfauth.utils import can_request_rfc_publication as utils_can_request_rfc_publication\nfrom ietf.utils.html import sanitize_fragment\nfrom ietf.utils import log\nfrom ietf.doc.utils import prettify_std_name\nfrom ietf.utils.text import wordwrap, fill, wrap_text_if_unwrapped, bleach_linker, bleach_cleaner, validate_url\n\nregister = template.Library()\n\ndef collapsebr(html):\n return re.sub('(<(br ?/|/p)>[ \\n]*)(<(br) ?/?>[ \\n]*)*(<(br|p) ?/?>[ \\n]*)', '\\\\1\\\\5', html)\n\[email protected]\ndef indent(value, numspaces=2):\n replacement = \"\\n\" + \" \" * int(numspaces)\n res = value.replace(\"\\n\", replacement)\n if res.endswith(replacement):\n res = res[:-int(numspaces)] # fix up superfluous spaces\n return res\n\[email protected]\ndef unindent(value):\n \"\"\"Remove indentation from string.\"\"\"\n return re.sub(\"\\n +\", \"\\n\", value)\n\n# there's an \"ahref -> a href\" in GEN_UTIL\n# but let's wait until we understand what that's for.\[email protected](name='make_one_per_line')\ndef make_one_per_line(value):\n \"\"\"\n Turn a comma-separated list into a carriage-return-seperated list.\n\n >>> force_str(make_one_per_line(\"a, b, c\"))\n 'a\\\\nb\\\\nc'\n\n Pass through non-strings:\n\n >>> make_one_per_line([1, 2])\n [1, 2]\n\n >>> make_one_per_line(None)\n\n \"\"\"\n if value and isinstance(value, str):\n return re.sub(\", ?\", \"\\n\", value)\n elif value and isinstance(value, bytes):\n log.assertion('isinstance(value, str)')\n else:\n return value\n\[email protected](name='keep_spacing')\ndef keep_spacing(value):\n \"\"\"\n Replace any two spaces with one and one space so that\n HTML output doesn't collapse them.\"\"\"\n return value.replace(' ', ' ')\n\[email protected](name='format_textarea')\ndef format_textarea(value):\n \"\"\"\n Escapes HTML, except for <b>, </b>, <br>.\n\n Adds <br> at the end like the builtin linebreaksbr.\n\n Also calls keep_spacing.\"\"\"\n return keep_spacing(linebreaksbr(escape(value).replace('<b>','<b>').replace('</b>','</b>').replace('<br>','<br>')))\n\[email protected](name='sanitize')\ndef sanitize(value):\n \"\"\"Sanitizes an HTML fragment.\n This means both fixing broken html and restricting elements and\n attributes to those deemed acceptable. See ietf/utils/html.py\n for the details.\n \"\"\"\n return mark_safe(sanitize_fragment(value))\n\n\n# For use with ballot view\[email protected](name='bracket')\ndef square_brackets(value):\n \"\"\"Adds square brackets around text.\"\"\"\n if isinstance(value, str):\n if value == \"\":\n value = \" \"\n return \"[ %s ]\" % value\n elif isinstance(value, bytes):\n log.assertion('isinstance(value, str)')\n elif value > 0:\n return \"[ X ]\"\n elif value < 0:\n return \"[ . ]\"\n else:\n return \"[ ]\"\n\[email protected](name='bracketpos')\ndef bracketpos(pos,posslug):\n if pos.pos.slug==posslug:\n return \"[ X ]\"\n elif posslug in [x.slug for x in pos.old_positions]:\n return \"[ . ]\"\n else:\n return \"[ ]\"\n\nregister.filter('fill', fill)\n\[email protected]\ndef prettystdname(string, space=\" \"):\n from ietf.doc.utils import prettify_std_name\n return prettify_std_name(force_str(string or \"\"), space)\n\[email protected]\ndef rfceditor_info_url(rfcnum : str):\n \"\"\"Link to the RFC editor info page for an RFC\"\"\"\n return urljoin(settings.RFC_EDITOR_INFO_BASE_URL, f'rfc{rfcnum}')\n\n\ndef doc_name(name):\n \"\"\"Check whether a given document exists, and return its canonical name\"\"\"\n\n def find_unique(n):\n key = hash(n)\n found = cache.get(key)\n if not found:\n exact = Document.objects.filter(name=n).first()\n found = exact.name if exact else \"_\"\n # TODO review this cache policy (and the need for these entire function)\n cache.set(key, found, timeout=60*60*24) # cache for one day\n return None if found == \"_\" else found\n\n # chop away extension\n extension_split = re.search(r\"^(.+)\\.(txt|ps|pdf|html)$\", name)\n if extension_split:\n name = extension_split.group(1)\n\n if find_unique(name):\n return name\n\n # check for embedded rev - this may be ambiguous, so don't\n # chop it off if we don't find a match\n rev_split = re.search(r\"^(charter-.+)-(\\d{2}-\\d{2})$\", name) or re.search(\n r\"^(.+)-(\\d{2}|[1-9]\\d{2,})$\", name\n )\n if rev_split:\n name = rev_split.group(1)\n if find_unique(name):\n return name\n\n return \"\"\n\n\ndef link_charter_doc_match(match):\n if not doc_name(match[0]):\n return match[0]\n url = urlreverse(\n \"ietf.doc.views_doc.document_main\",\n kwargs=dict(name=match[1][:-1], rev=match[2]),\n )\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n\ndef link_non_charter_doc_match(match):\n name = match[0]\n # handle \"I-D.*\"\" reference-style matches\n name = re.sub(r\"^i-d\\.(.*)\", r\"draft-\\1\", name, flags=re.IGNORECASE)\n cname = doc_name(name)\n if not cname:\n return match[0]\n if name == cname:\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=cname))\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n # if we get here, the name probably has a version number and/or extension at the end\n rev_split = re.search(r\"^(\" + re.escape(cname) + r\")-(\\d{2,})\", name)\n if rev_split:\n name = rev_split.group(1)\n else:\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=cname))\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n cname = doc_name(name)\n if not cname:\n return match[0]\n if name == cname:\n try:\n url = urlreverse(\n \"ietf.doc.views_doc.document_main\",\n kwargs=dict(name=cname, rev=rev_split.group(2)),\n )\n except NoReverseMatch:\n return match[0]\n return f'<a href=\"{url}\">{match[0]}</a>'\n\n # if we get here, we can't linkify\n return match[0]\n\n\ndef link_other_doc_match(match):\n doc = match[2].strip().lower()\n rev = match[3]\n if not doc_name(doc + rev):\n return match[0]\n url = urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=doc + rev))\n return f'<a href=\"{url}\">{match[1]}</a>'\n\[email protected](name=\"urlize_ietf_docs\", is_safe=True, needs_autoescape=True)\ndef urlize_ietf_docs(string, autoescape=None):\n \"\"\"\n Make occurrences of RFC NNNN and draft-foo-bar links to the doc pages.\n \"\"\"\n if autoescape and not isinstance(string, SafeData):\n if \"<\" in string:\n string = escape(string)\n else:\n string = mark_safe(string)\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])(charter-(?:[\\d\\w\\.+]+-)*)(\\d{2}(?:-\\d{2}))(\\.(?:txt|ps|pdf|html))?\\b\",\n link_charter_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])((?:draft-|i-d\\.|bofreq-|conflict-review-|status-change-)[\\d\\w\\.+-]+(?![-@]))\",\n link_non_charter_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n string = re.sub(\n r\"\\b(?<![/\\-:=#\\\"\\'])((RFC|BCP|STD|FYI) *\\n? *0*(\\d+))\\b\",\n link_other_doc_match,\n string,\n flags=re.IGNORECASE | re.ASCII,\n )\n\n return mark_safe(string)\n\n\nurlize_ietf_docs = stringfilter(urlize_ietf_docs)\n\[email protected](name='urlize_related_source_list', is_safe=True, document_html=False)\ndef urlize_related_source_list(related, document_html=False):\n \"\"\"Convert a list of RelatedDocuments into list of links using the source document's canonical name\"\"\"\n links = []\n names = set()\n titles = set()\n for rel in related:\n name=rel.source.name\n title = rel.source.title\n if name in names and title in titles:\n continue\n names.add(name)\n titles.add(title)\n url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name))\n name = escape(name)\n title = escape(title)\n links.append(mark_safe(\n '<a href=\"%(url)s\" title=\"%(title)s\">%(name)s</a>' % dict(name=prettify_std_name(name),\n title=title,\n url=url)\n ))\n return links\n \[email protected](name='urlize_related_target_list', is_safe=True, document_html=False)\ndef urlize_related_target_list(related, document_html=False):\n \"\"\"Convert a list of RelatedDocuments into list of links using the target document's canonical name\"\"\"\n links = []\n for rel in related:\n name=rel.target.name\n title = rel.target.title\n url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name))\n name = escape(name)\n title = escape(title)\n links.append(mark_safe(\n '<a href=\"%(url)s\" title=\"%(title)s\">%(name)s</a>' % dict(name=prettify_std_name(name),\n title=title,\n url=url)\n ))\n return links\n \[email protected](name='dashify')\ndef dashify(string):\n \"\"\"\n Replace each character in string with '-', to produce\n an underline effect for plain text files.\n \"\"\"\n return re.sub('.', '-', string)\n\[email protected]\ndef underline(string):\n \"\"\"Return string with an extra line underneath of dashes, for plain text underlining.\"\"\"\n return string + \"\\n\" + (\"-\" * len(string))\n\[email protected](name='timesince_days')\ndef timesince_days(date):\n \"\"\"Returns the number of days since 'date' (relative to now)\n\n >>> timesince_days(timezone.now() - datetime.timedelta(days=2))\n 2\n\n >>> tz = ZoneInfo(settings.TIME_ZONE)\n >>> timesince_days(timezone.now().astimezone(tz).date() - datetime.timedelta(days=2))\n 2\n\n \"\"\"\n if date.__class__ is not datetime.datetime:\n date = datetime.datetime(date.year, date.month, date.day, tzinfo=ZoneInfo(settings.TIME_ZONE))\n delta = timezone.now() - date\n return delta.days\n\[email protected]\ndef split(text, splitter=None):\n return text.split(splitter)\n\nregister.filter(\"maybewordwrap\", stringfilter(wrap_text_if_unwrapped))\n\nregister.filter(\"wordwrap\", stringfilter(wordwrap))\n\[email protected](name=\"compress_empty_lines\")\ndef compress_empty_lines(text):\n text = re.sub(\"( *\\n){3,}\", \"\\n\\n\", text)\n return text\n\[email protected](name='linebreaks_crlf')\ndef linebreaks_crlf(text):\n \"\"\"\n Normalize all linebreaks to CRLF.\n \"\"\"\n # First, map CRLF to LF\n text = text.replace(\"\\r\\n\", \"\\n\")\n # Next, map lone CRs to LFs\n text = text.replace(\"\\r\", \"\\n\")\n # Finally, map LFs to CRLFs\n text = text.replace(\"\\n\", \"\\r\\n\")\n return text\n\[email protected](name='linebreaks_lf')\ndef linebreaks_lf(text):\n \"\"\"\n Normalize all linebreaks to LF.\n \"\"\"\n # First, map CRLF to LF\n text = text.replace(\"\\r\\n\", \"\\n\")\n # Finally, map lone CRs to LFs\n text = text.replace(\"\\r\", \"\\n\")\n return text\n\[email protected](name='clean_whitespace')\ndef clean_whitespace(text):\n \"\"\"\n Map all ASCII control characters (0x00-0x1F) to spaces, and\n remove unnecessary spaces.\n \"\"\"\n text = re.sub(\"[\\000-\\040]+\", \" \", text)\n return text.strip()\n\[email protected](name='unescape')\ndef unescape(text):\n \"\"\"\n Unescape />/<\n \"\"\"\n text = text.replace(\">\", \">\")\n text = text.replace(\"<\", \"<\")\n text = text.replace(\"&\", \"&\")\n text = text.replace(\"<br>\", \"\\n\")\n text = text.replace(\"<br/>\", \"\\n\")\n return text\n\[email protected](name='new_enough')\ndef new_enough(x,request):\n days = int(settings.USER_PREFERENCE_DEFAULTS[\"new_enough\"])\n value = request.COOKIES.get(\"new_enough\", None)\n if value and value.isdigit():\n days = int(value)\n return x < days\n\[email protected](name='expires_soon')\ndef expires_soon(x,request):\n days = int(settings.USER_PREFERENCE_DEFAULTS[\"expires_soon\"])\n value = request.COOKIES.get(\"expires_soon\", None)\n if value and value.isdigit():\n days = int(value)\n return x > -days\n\[email protected](name='startswith')\ndef startswith(x, y):\n return str(x).startswith(y)\n\n\[email protected](name='removeprefix', is_safe=False)\ndef removeprefix(value, prefix):\n \"\"\"Remove an exact-match prefix\n \n The is_safe flag is False because indiscriminate use of this could result in non-safe output.\n See https://docs.djangoproject.com/en/2.2/howto/custom-template-tags/#filters-and-auto-escaping\n which describes the possibility that removing characters from an escaped string may introduce\n HTML-unsafe output.\n \"\"\"\n base = str(value)\n if base.startswith(prefix):\n return base[len(prefix):]\n else:\n return base\n\n\[email protected]\ndef has_role(user, role_names):\n from ietf.ietfauth.utils import has_role\n if not user:\n return False\n return has_role(user, role_names.split(','))\n\[email protected]\ndef ad_area(user):\n if user and user.is_authenticated:\n from ietf.group.models import Group\n g = Group.objects.filter(role__name__in=(\"pre-ad\", \"ad\"), role__person__user=user)\n if g:\n return g[0].acronym\n return None\n\[email protected]\ndef format_history_text(text, trunc_words=25):\n \"\"\"Run history text through some cleaning and add ellipsis if it's too long.\"\"\"\n full = mark_safe(bleach_cleaner.clean(text))\n full = bleach_linker.linkify(urlize_ietf_docs(full))\n\n return format_snippet(full, trunc_words)\n\[email protected]\ndef format_snippet(text, trunc_words=25): \n # urlize if there aren't already links present\n text = bleach_linker.linkify(text)\n full = keep_spacing(collapsebr(linebreaksbr(mark_safe(sanitize_fragment(text)))))\n snippet = truncatewords_html(full, trunc_words)\n if snippet != full:\n return mark_safe('<div class=\"snippet\">%s<button type=\"button\" aria-label=\"Expand\" class=\"btn btn-sm btn-primary show-all\"><i class=\"bi bi-caret-down\"></i></button></div><div class=\"d-none full\">%s</div>' % (snippet, full))\n return mark_safe(full)\n\[email protected]_tag\ndef doc_edit_button(url_name, *args, **kwargs):\n \"\"\"Given URL name/args/kwargs, looks up the URL just like \"url\" tag and returns a properly formatted button for the document material tables.\"\"\"\n return mark_safe('<a class=\"btn btn-primary btn-sm\" href=\"%s\">Edit</a>' % (urlreverse(url_name, args=args, kwargs=kwargs)))\n\[email protected]\ndef textify(text):\n text = re.sub(\"</?b>\", \"*\", text)\n text = re.sub(\"</?i>\", \"/\", text)\n # There are probably additional conversions we should apply here\n return text\n\[email protected]\ndef state(doc, slug):\n if slug == \"stream\": # convenient shorthand\n slug = \"%s-stream-%s\" % (doc.type_id, doc.stream_id)\n return doc.get_state(slug)\n\[email protected]\ndef statehelp(state):\n \"Output help icon with tooltip for state.\"\n from django.urls import reverse as urlreverse\n tooltip = escape(strip_tags(state.desc))\n url = urlreverse('ietf.doc.views_help.state_help', kwargs=dict(type=state.type_id)) + \"#\" + state.slug\n return mark_safe('<a class=\"state-help-icon\" href=\"%s\" title=\"%s\">?</a>' % (url, tooltip))\n\[email protected]\ndef sectionlevel(section_number):\n return section_number.count(\".\") + 1\n\ndef _test():\n import doctest\n doctest.testmod()\n\nif __name__ == \"__main__\":\n _test()\n\[email protected]\ndef plural(text, seq, arg='s'):\n \"Similar to pluralize, but looks at the text, too\"\n from django.template.defaultfilters import pluralize\n if text.endswith('s'):\n return text\n else:\n return text + pluralize(len(seq), arg)\n\[email protected]\ndef ics_esc(text):\n text = re.sub(r\"([\\n,;\\\\])\", r\"\\\\\\1\", text)\n return text\n\n\[email protected]_tag\ndef ics_date_time(dt, tzname):\n \"\"\"Render a datetime as an iCalendar date-time\n\n dt a datetime, localized to the timezone to be displayed\n tzname is the name for this timezone\n\n Caller must arrange for a VTIMEZONE for the tzname to be included in the iCalendar file.\n Output includes a ':'. Use like:\n DTSTART{% ics_date_time timestamp 'America/Los_Angeles' %}\n to get\n DTSTART;TZID=America/Los_Angeles:20221021T111200\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'utc')\n ':20220102T030405Z'\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'UTC')\n ':20220102T030405Z'\n\n >>> ics_date_time(datetime.datetime(2022,1,2,3,4,5), 'America/Los_Angeles')\n ';TZID=America/Los_Angeles:20220102T030405'\n \"\"\"\n timestamp = dt.strftime('%Y%m%dT%H%M%S')\n if tzname.lower() == 'utc':\n return f':{timestamp}Z'\n else:\n return f';TZID={ics_esc(tzname)}:{timestamp}'\n \[email protected]\ndef next_day(value):\n return value + datetime.timedelta(days=1)\n\n\[email protected]\ndef consensus(doc):\n \"\"\"Returns document consensus Yes/No/Unknown.\"\"\"\n event = doc.latest_event(ConsensusDocEvent,type=\"changed_consensus\")\n if event:\n if event.consensus:\n return \"Yes\"\n else:\n return \"No\"\n else:\n return \"Unknown\"\n\n\[email protected]\ndef std_level_to_label_format(doc):\n \"\"\"Returns valid Bootstrap classes to label a status level badge.\"\"\"\n if doc.type_id == \"rfc\":\n if doc.related_that(\"obs\"):\n return \"obs\"\n else:\n return doc.std_level_id\n else:\n return \"draft\"\n\n\[email protected]\ndef pos_to_label_format(text):\n \"\"\"Returns valid Bootstrap classes to label a ballot position.\"\"\"\n return {\n 'Yes': 'bg-yes text-light',\n 'No Objection': 'bg-noobj text-dark',\n 'Abstain': 'bg-abstain text-light',\n 'Discuss': 'bg-discuss text-light',\n 'Block': 'bg-discuss text-light',\n 'Recuse': 'bg-recuse text-light',\n 'Not Ready': 'bg-discuss text-light',\n 'Need More Time': 'bg-discuss text-light',\n 'Concern': 'bg-discuss text-light',\n\n }.get(str(text), 'bg-norecord text-dark')\n\[email protected]\ndef pos_to_border_format(text):\n \"\"\"Returns valid Bootstrap classes to label a ballot position border.\"\"\"\n return {\n 'Yes': 'border-yes',\n 'No Objection': 'border-noobj',\n 'Abstain': 'border-abstain',\n 'Discuss': 'border-discuss',\n 'Block': 'border-discuss',\n 'Recuse': 'border-recuse',\n 'Not Ready': 'border-discuss',\n 'Need More Time': 'border-discuss',\n 'Concern': 'border-discuss',\n }.get(str(text), 'border-norecord')\n\[email protected]\ndef capfirst_allcaps(text):\n \"\"\"Like capfirst, except it doesn't lowercase words in ALL CAPS.\"\"\"\n result = text\n i = False\n for token in re.split(r\"(\\W+)\", striptags(text)):\n if not re.match(r\"^[A-Z]+$\", token):\n if not i:\n result = result.replace(token, token.capitalize())\n i = True\n else:\n result = result.replace(token, token.lower())\n return result\n\[email protected]\ndef lower_allcaps(text):\n \"\"\"Like lower, except it doesn't lowercase words in ALL CAPS.\"\"\"\n result = text\n for token in re.split(r\"(\\W+)\", striptags(text)):\n if not re.match(r\"^[A-Z]+$\", token):\n result = result.replace(token, token.lower())\n return result\n\[email protected]\ndef document_content(doc):\n if doc is None:\n return None\n content = doc.text_or_error() # pyflakes:ignore\n return content\n\[email protected]\ndef format_timedelta(timedelta):\n s = timedelta.seconds\n hours, remainder = divmod(s, 3600)\n minutes, seconds = divmod(remainder, 60)\n return '{hours:02d}:{minutes:02d}'.format(hours=hours,minutes=minutes)\n\[email protected]()\ndef comma_separated_list(seq, end_word=\"and\"):\n if len(seq) < 2:\n return \"\".join(seq)\n else:\n return \", \".join(seq[:-1]) + \" %s %s\"%(end_word, seq[-1])\n\[email protected]()\ndef zaptmp(s):\n return re.sub(r'/tmp/tmp[^/]+/', '', s)\n\[email protected]()\ndef rfcbis(s):\n m = re.search(r'^.*-rfc(\\d+)-?bis(-.*)?$', s)\n return None if m is None else 'rfc' + m.group(1) \n\[email protected]\n@stringfilter\ndef urlize(value):\n raise RuntimeError(\"Use linkify from textfilters instead of urlize\")\n \[email protected]\n@stringfilter\ndef charter_major_rev(rev):\n return rev[:2]\n\[email protected]\n@stringfilter\ndef charter_minor_rev(rev):\n return rev[3:5]\n\[email protected]()\ndef can_defer(user,doc):\n ballot = doc.latest_event(BallotDocEvent, type=\"created_ballot\")\n if ballot and (doc.type_id == \"draft\" or doc.type_id == \"conflrev\" or doc.type_id==\"statchg\") and doc.stream_id == 'ietf' and has_role(user, 'Area Director,Secretariat'):\n return True\n else:\n return False\n\[email protected]()\ndef can_clear_ballot(user, doc):\n return can_defer(user, doc)\n\[email protected]()\ndef can_request_rfc_publication(user, doc):\n return utils_can_request_rfc_publication(user, doc)\n\[email protected]()\ndef can_ballot(user,doc):\n if doc.stream_id == \"irtf\" and doc.type_id == \"draft\":\n return has_role(user,\"IRSG Member\")\n elif doc.stream_id == \"editorial\" and doc.type_id == \"draft\":\n return has_role(user,\"RSAB Member\")\n else:\n return user.person.role_set.filter(name=\"ad\", group__type=\"area\", group__state=\"active\")\n\n\[email protected]\ndef action_holder_badge(action_holder):\n \"\"\"Add a warning tag if action holder age exceeds limit\n\n >>> from ietf.doc.factories import DocumentActionHolderFactory\n >>> old_limit = settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS\n >>> settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS = 15\n >>> action_holder_badge(DocumentActionHolderFactory())\n ''\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=15)))\n ''\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=16)))\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for 16 days; goal is <15 days.\"><i class=\"bi bi-clock-fill\"></i> 16</span>'\n\n >>> action_holder_badge(DocumentActionHolderFactory(time_added=timezone.now() - datetime.timedelta(days=30)))\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for 30 days; goal is <15 days.\"><i class=\"bi bi-clock-fill\"></i> 30</span>'\n\n >>> settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS = old_limit\n \"\"\"\n age_limit = settings.DOC_ACTION_HOLDER_AGE_LIMIT_DAYS\n age = (timezone.now() - action_holder.time_added).days\n if age > age_limit:\n return mark_safe(\n '<span class=\"badge rounded-pill text-bg-danger\" title=\"In state for %d day%s; goal is <%d days.\"><i class=\"bi bi-clock-fill\"></i> %d</span>'\n % (age, \"s\" if age != 1 else \"\", age_limit, age)\n )\n else:\n return \"\" # no alert needed\n\n\[email protected]\ndef is_regular_agenda_item(assignment):\n \"\"\"Is this agenda item a regular session item?\n\n A regular item appears as a sub-entry in a timeslot within the agenda\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> is_regular_agenda_item(factory('regular'))\n True\n\n >>> any(is_regular_agenda_item(factory(t)) for t in ['plenary', 'break', 'reg', 'other', 'officehours'])\n False\n\n >>> is_regular_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug == 'regular'\n\[email protected]\ndef is_plenary_agenda_item(assignment):\n \"\"\"Is this agenda item a regular session item?\n\n A regular item appears as a sub-entry in a timeslot within the agenda\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> is_plenary_agenda_item(factory('plenary'))\n True\n\n >>> any(is_plenary_agenda_item(factory(t)) for t in ['regular', 'break', 'reg', 'other', 'officehours'])\n False\n\n >>> is_plenary_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug == 'plenary'\n\[email protected]\ndef is_special_agenda_item(assignment):\n \"\"\"Is this agenda item a special item?\n\n Special items appear as top-level agenda entries with their own timeslot information.\n\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_timeslot = namedtuple('t2', ['slug'])\n >>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable\n >>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))\n >>> all(is_special_agenda_item(factory(t)) for t in ['break', 'reg', 'other', 'officehours'])\n True\n\n >>> any(is_special_agenda_item(factory(t)) for t in ['regular', 'plenary'])\n False\n\n >>> is_special_agenda_item(None)\n False\n \"\"\"\n return assignment is not None and assignment.slot_type().slug in [\n 'break',\n 'reg',\n 'other',\n 'officehours',\n ]\n\[email protected]\ndef should_show_agenda_session_buttons(assignment):\n \"\"\"Should this agenda item show the session buttons (chat link, etc)?\n\n In IETF-112 and earlier, office hours sessions were designated by a name ending\n with ' office hours' and belonged to the IESG or some other group. This led to\n incorrect session buttons being displayed. Suppress session buttons for\n when name ends with 'office hours' in the pre-112 meetings.\n >>> from collections import namedtuple # use to build mock objects\n >>> mock_meeting = namedtuple('t3', ['number'])\n >>> mock_session = namedtuple('t2', ['name'])\n >>> mock_assignment = namedtuple('t1', ['meeting', 'session']) # meeting must be a callable\n >>> factory = lambda num, name: mock_assignment(session=mock_session(name), meeting=lambda: mock_meeting(num))\n >>> test_cases = [('105', 'acme office hours'), ('112', 'acme office hours')]\n >>> any(should_show_agenda_session_buttons(factory(*tc)) for tc in test_cases)\n False\n >>> test_cases = [('interim-2020-acme-113', 'acme'), ('113', 'acme'), ('150', 'acme'), ('105', 'acme'),]\n >>> test_cases.extend([('112', 'acme'), ('interim-2020-acme-113', 'acme office hours')])\n >>> test_cases.extend([('113', 'acme office hours'), ('150', 'acme office hours')])\n >>> all(should_show_agenda_session_buttons(factory(*tc)) for tc in test_cases)\n True\n >>> should_show_agenda_session_buttons(None)\n False\n \"\"\"\n if assignment is None:\n return False\n num = assignment.meeting().number\n if num.isdigit() and int(num) <= settings.MEETING_LEGACY_OFFICE_HOURS_END:\n return not assignment.session.name.lower().endswith(' office hours')\n else:\n return True\n\n\[email protected]_tag\ndef absurl(viewname, **kwargs):\n \"\"\"Get the absolute URL for a view by name\n\n Uses settings.IDTRACKER_BASE_URL as the base.\n \"\"\"\n return urljoin(settings.IDTRACKER_BASE_URL, urlreverse(viewname, kwargs=kwargs))\n\n\[email protected]\ndef is_valid_url(url):\n \"\"\"\n Check if the given URL is syntactically valid\n \"\"\"\n try:\n validate_url(url)\n except ValidationError:\n return False\n return True\n\n\[email protected]\ndef badgeify(blob):\n \"\"\"\n Add an appropriate bootstrap badge around \"text\", based on its contents.\n \"\"\"\n config = [\n (r\"rejected|not ready\", \"danger\", \"x-lg\"),\n (r\"complete|accepted|ready\", \"success\", \"\"),\n (r\"has nits|almost ready\", \"info\", \"info-lg\"),\n (r\"has issues\", \"warning\", \"exclamation-lg\"),\n (r\"assigned\", \"info\", \"person-plus-fill\"),\n (r\"will not review|overtaken by events|withdrawn\", \"secondary\", \"dash-lg\"),\n (r\"no response\", \"warning\", \"question-lg\"),\n ]\n text = str(blob)\n\n for pattern, color, icon in config:\n if re.search(pattern, text, flags=re.IGNORECASE):\n # Shorten the badge text\n text = re.sub(r\"with \", \"w/\", text, flags=re.IGNORECASE)\n text = re.sub(r\"document\", \"doc\", text, flags=re.IGNORECASE)\n text = re.sub(r\"will not\", \"won't\", text, flags=re.IGNORECASE)\n\n return mark_safe(\n f\"\"\"\n <span class=\"badge rounded-pill text-bg-{color} text-wrap\">\n <i class=\"bi bi-{icon}\"></i> {text.capitalize()}\n </span>\n \"\"\"\n )\n\n return text\n",
"path": "ietf/doc/templatetags/ietf_filters.py"
}
] | diff --git a/ietf/doc/templatetags/ietf_filters.py b/ietf/doc/templatetags/ietf_filters.py
index 8d9336b536..cfed7aa1db 100644
--- a/ietf/doc/templatetags/ietf_filters.py
+++ b/ietf/doc/templatetags/ietf_filters.py
@@ -539,6 +539,10 @@ def ics_date_time(dt, tzname):
return f':{timestamp}Z'
else:
return f';TZID={ics_esc(tzname)}:{timestamp}'
+
[email protected]
+def next_day(value):
+ return value + datetime.timedelta(days=1)
@register.filter
diff --git a/ietf/templates/meeting/upcoming.ics b/ietf/templates/meeting/upcoming.ics
index fb5b37d772..5eca7ec81d 100644
--- a/ietf/templates/meeting/upcoming.ics
+++ b/ietf/templates/meeting/upcoming.ics
@@ -25,7 +25,7 @@ SUMMARY:IETF {{ meeting.number }}{% if meeting.city %}
LOCATION:{{ meeting.city }},{{ meeting.country }}{% endif %}
CLASS:PUBLIC
DTSTART;VALUE=DATE{% if meeting.time_zone %};TZID={{ meeting.time_zone|ics_esc }}{% endif %}:{{ meeting.date|date:"Ymd" }}
-DTEND;VALUE=DATE{% if meeting.time_zone %};TZID={{ meeting.time_zone|ics_esc }}{% endif %}:{{ meeting.end_date|date:"Ymd" }}
+DTEND;VALUE=DATE{% if meeting.time_zone %};TZID={{ meeting.time_zone|ics_esc }}{% endif %}:{{ meeting.end_date|next_day|date:"Ymd" }}
DTSTAMP{% ics_date_time meeting.cached_updated|utc 'utc' %}
URL:{{ request.scheme }}://{{ request.get_host }}{% url 'agenda' num=meeting.number %}
END:VEVENT
|
microsoft__playwright-python-1497 | [Question]: How to get the right BrowserType from a device name?
### Your question
I noticed that the CLI is able to figure out the right `BrowserType` to use when it is launched from the command line:
```
playwright open --device="Desktop Safari" wikipedia.org # Webkit
playwright open --device="Desktop Firefox" wikipedia.org # Firefox
playwright open --device="Desktop Chrome" wikipedia.org # Chrome
```
But [the documentation](https://playwright.dev/python/docs/api/class-playwright#playwright-devices) seems to say that I have to initialize a `BrowserType` before I can pass the settings to the context, which partially defeats the purpose of the device settings.
I can implement my own logic do initialize the right `BrowserType` for each device, but as `playwright open` can already do that, that seems superfluous.
| [
{
"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom playwright._impl._browser_type import BrowserType\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._fetch import APIRequest\nfrom playwright._impl._local_utils import LocalUtils\nfrom playwright._impl._selectors import Selectors, SelectorsOwner\n\n\nclass Playwright(ChannelOwner):\n devices: Dict\n selectors: Selectors\n chromium: BrowserType\n firefox: BrowserType\n webkit: BrowserType\n request: APIRequest\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.request = APIRequest(self)\n self.chromium = from_channel(initializer[\"chromium\"])\n self.chromium._playwright = self\n self.firefox = from_channel(initializer[\"firefox\"])\n self.firefox._playwright = self\n self.webkit = from_channel(initializer[\"webkit\"])\n self.webkit._playwright = self\n\n self.selectors = Selectors(self._loop, self._dispatcher_fiber)\n selectors_owner: SelectorsOwner = from_channel(initializer[\"selectors\"])\n self.selectors._add_channel(selectors_owner)\n\n self._connection.on(\n \"close\", lambda: self.selectors._remove_channel(selectors_owner)\n )\n self.devices = {}\n self.devices = {\n device[\"name\"]: parse_device_descriptor(device[\"descriptor\"])\n for device in initializer[\"deviceDescriptors\"]\n }\n self._utils: LocalUtils = from_channel(initializer[\"utils\"])\n\n def __getitem__(self, value: str) -> \"BrowserType\":\n if value == \"chromium\":\n return self.chromium\n elif value == \"firefox\":\n return self.firefox\n elif value == \"webkit\":\n return self.webkit\n raise ValueError(\"Invalid browser \" + value)\n\n def _set_selectors(self, selectors: SelectorsOwner) -> None:\n selectors_owner = from_channel(self._initializer[\"selectors\"])\n self.selectors._remove_channel(selectors_owner)\n self.selectors = selectors\n self.selectors._add_channel(selectors_owner)\n\n def stop(self) -> None:\n pass\n\n\ndef parse_device_descriptor(dict: Dict) -> Dict:\n return {\n \"user_agent\": dict[\"userAgent\"],\n \"viewport\": dict[\"viewport\"],\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n }\n",
"path": "playwright/_impl/_playwright.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom playwright._impl._browser_type import BrowserType\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._fetch import APIRequest\nfrom playwright._impl._local_utils import LocalUtils\nfrom playwright._impl._selectors import Selectors, SelectorsOwner\n\n\nclass Playwright(ChannelOwner):\n devices: Dict\n selectors: Selectors\n chromium: BrowserType\n firefox: BrowserType\n webkit: BrowserType\n request: APIRequest\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.request = APIRequest(self)\n self.chromium = from_channel(initializer[\"chromium\"])\n self.chromium._playwright = self\n self.firefox = from_channel(initializer[\"firefox\"])\n self.firefox._playwright = self\n self.webkit = from_channel(initializer[\"webkit\"])\n self.webkit._playwright = self\n\n self.selectors = Selectors(self._loop, self._dispatcher_fiber)\n selectors_owner: SelectorsOwner = from_channel(initializer[\"selectors\"])\n self.selectors._add_channel(selectors_owner)\n\n self._connection.on(\n \"close\", lambda: self.selectors._remove_channel(selectors_owner)\n )\n self.devices = {}\n self.devices = {\n device[\"name\"]: parse_device_descriptor(device[\"descriptor\"])\n for device in initializer[\"deviceDescriptors\"]\n }\n self._utils: LocalUtils = from_channel(initializer[\"utils\"])\n\n def __getitem__(self, value: str) -> \"BrowserType\":\n if value == \"chromium\":\n return self.chromium\n elif value == \"firefox\":\n return self.firefox\n elif value == \"webkit\":\n return self.webkit\n raise ValueError(\"Invalid browser \" + value)\n\n def _set_selectors(self, selectors: SelectorsOwner) -> None:\n selectors_owner = from_channel(self._initializer[\"selectors\"])\n self.selectors._remove_channel(selectors_owner)\n self.selectors = selectors\n self.selectors._add_channel(selectors_owner)\n\n def stop(self) -> None:\n pass\n\n\ndef parse_device_descriptor(dict: Dict) -> Dict:\n return {\n \"user_agent\": dict[\"userAgent\"],\n \"viewport\": dict[\"viewport\"],\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n \"default_browser_type\": dict[\"defaultBrowserType\"],\n }\n",
"path": "playwright/_impl/_playwright.py"
}
] | diff --git a/playwright/_impl/_playwright.py b/playwright/_impl/_playwright.py
index 354e3d11c..f9fe7617f 100644
--- a/playwright/_impl/_playwright.py
+++ b/playwright/_impl/_playwright.py
@@ -81,4 +81,5 @@ def parse_device_descriptor(dict: Dict) -> Dict:
"device_scale_factor": dict["deviceScaleFactor"],
"is_mobile": dict["isMobile"],
"has_touch": dict["hasTouch"],
+ "default_browser_type": dict["defaultBrowserType"],
}
diff --git a/tests/async/test_device_descriptors.py b/tests/async/test_device_descriptors.py
new file mode 100644
index 000000000..c8790b2a8
--- /dev/null
+++ b/tests/async/test_device_descriptors.py
@@ -0,0 +1,37 @@
+import pytest
+
+
[email protected]_browser("chromium")
+async def test_should_work(playwright) -> None:
+ device_descriptor = playwright.devices["Pixel 2"]
+ device_type = device_descriptor["default_browser_type"]
+ browser = await playwright[device_type].launch()
+ context = await browser.new_context(
+ **device_descriptor,
+ )
+ page = await context.new_page()
+ assert device_descriptor["default_browser_type"] == "chromium"
+ assert browser.browser_type.name == "chromium"
+
+ assert "Pixel 2" in device_descriptor["user_agent"]
+ assert "Pixel 2" in await page.evaluate("navigator.userAgent")
+
+ assert device_descriptor["device_scale_factor"] > 2
+ assert await page.evaluate("window.devicePixelRatio") > 2
+
+ assert device_descriptor["viewport"]["height"] > 700
+ assert device_descriptor["viewport"]["height"] < 800
+ inner_height = await page.evaluate("window.innerHeight")
+ inner_height > 700
+ inner_height < 800
+
+ assert device_descriptor["viewport"]["width"] > 400
+ assert device_descriptor["viewport"]["width"] < 500
+ inner_width = await page.evaluate("window.innerWidth")
+ inner_width > 400
+ inner_width < 500
+
+ assert device_descriptor["has_touch"]
+ assert device_descriptor["is_mobile"]
+
+ await browser.close()
|
mkdocs__mkdocs-2893 | Support latest realise of Markdown library
I believe there has been some update to the `Markdown` library and how it internally records its version that is breaking things.
With a brand new environment and a fresh install of `mkdocs`, a `mkdocs build --strict --verbose` fails my project with this error:
```bash
DEBUG - Loading configuration file: /Users/sh/Projects/dataportalapiclient/mkdocs.yml
ERROR - Config value: 'markdown_extensions'. Error: module 'markdown' has no attribute 'version_info'
```
At this point, mkdocs has a dependency on `Markdown==3.4.1`, which was released [three days ago](https://github.com/Python-Markdown/markdown/tags).
After running a `pip install Markdown==3.3.7` to downgrade the version, rerunning the build is successful:
```bash
DEBUG - Loading configuration file: /Users/sh/Projects/dataportalapiclient/mkdocs.yml
...
DEBUG - mkdocstrings: Tearing handlers down
INFO - Documentation built in 3.45 seconds
```
I notice in [this commit from May 27th on the Markdown repository](https://github.com/Python-Markdown/markdown/commit/a767b2daaad78ba32d45a4f1dabb7c5e218f030a), the deprecated `version_info` info object was removed, and replaced with the `__version_info__` object, as per this table:
| Deprecated Object | Replacement Object |
|----------------------------------------|-------------------------------------|
| `markdown.version` | `markdown.__version__` |
| `markdown.version_info` | `markdown.__version_info__` |
| `markdown.util.etree` | `xml.etree.ElementTree` |
| `markdown.util.string_type` | `str` |
| `markdown.util.text_type` | `str` |
| `markdown.util.int2str` | `chr` |
| `markdown.util.iterrange` | `range` |
| `markdown.util.isBlockLevel` | `markdown.Markdown.is_block_level` |
| `markdown.util.Processor().markdown` | `markdown.util.Processor().md` |
| `markdown.util.Registry().__setitem__` | `markdown.util.Registry().register` |
| `markdown.util.Registry().__delitem__` |`markdown.util.Registry().deregister`|
| `markdown.util.Registry().add` | `markdown.util.Registry().register` |
Hopefully the fix is a simple change to this dunder object! Whether this repository is the right place for the packaged markdown extension or not, I'm unsure, I couldn't quite see where that config gets run either here or in the [Python Markdown library](https://github.com/Python-Markdown/markdown/).
If this isn't the place, I'd appreciate if you can please point me towards the right repo.
| [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\nfrom mkdocs.commands.setup import babel_cmdclass\n\nwith open('README.md') as f:\n long_description = f.read()\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep Babel\"):\n print(\"babel not installed.\\nUse `pip install babel`.\\nExiting.\")\n sys.exit()\n for locale in os.listdir(\"mkdocs/themes/mkdocs/locales\"):\n os.system(f\"python setup.py compile_catalog -t mkdocs -l {locale}\")\n os.system(f\"python setup.py compile_catalog -t readthedocs -l {locale}\")\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n version = get_version(\"mkdocs\")\n print(f\" git tag -a {version} -m 'version {version}'\")\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n project_urls={\n 'Source': 'https://github.com/mkdocs/mkdocs',\n },\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.10.2',\n 'Markdown>=3.2.1',\n 'PyYAML>=3.10',\n 'watchdog>=2.0',\n 'ghp-import>=1.0',\n 'pyyaml_env_tag>=0.1',\n 'importlib_metadata>=4.3',\n 'packaging>=20.5',\n 'mergedeep>=1.3.4'\n ],\n extras_require={\"i18n\": ['babel>=2.9.0']},\n python_requires='>=3.6',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n cmdclass=babel_cmdclass,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\nfrom mkdocs.commands.setup import babel_cmdclass\n\nwith open('README.md') as f:\n long_description = f.read()\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep Babel\"):\n print(\"babel not installed.\\nUse `pip install babel`.\\nExiting.\")\n sys.exit()\n for locale in os.listdir(\"mkdocs/themes/mkdocs/locales\"):\n os.system(f\"python setup.py compile_catalog -t mkdocs -l {locale}\")\n os.system(f\"python setup.py compile_catalog -t readthedocs -l {locale}\")\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n version = get_version(\"mkdocs\")\n print(f\" git tag -a {version} -m 'version {version}'\")\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n project_urls={\n 'Source': 'https://github.com/mkdocs/mkdocs',\n },\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.10.2',\n 'Markdown>=3.2.1,<3.4',\n 'PyYAML>=3.10',\n 'watchdog>=2.0',\n 'ghp-import>=1.0',\n 'pyyaml_env_tag>=0.1',\n 'importlib_metadata>=4.3',\n 'packaging>=20.5',\n 'mergedeep>=1.3.4'\n ],\n extras_require={\"i18n\": ['babel>=2.9.0']},\n python_requires='>=3.6',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n cmdclass=babel_cmdclass,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n",
"path": "setup.py"
}
] | diff --git a/requirements/project.txt b/requirements/project.txt
index 1402023779..b158a10a42 100644
--- a/requirements/project.txt
+++ b/requirements/project.txt
@@ -1,7 +1,7 @@
babel>=2.9.0
click>=7.0
Jinja2>=2.10.2
-Markdown>=3.2.1
+Markdown>=3.2.1,<3.4
PyYAML>=5.2
watchdog>=2.0.0
mdx_gh_links>=0.2
diff --git a/setup.py b/setup.py
index 77cabc87e3..1115ce0e75 100755
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,7 @@ def get_packages(package):
install_requires=[
'click>=3.3',
'Jinja2>=2.10.2',
- 'Markdown>=3.2.1',
+ 'Markdown>=3.2.1,<3.4',
'PyYAML>=3.10',
'watchdog>=2.0',
'ghp-import>=1.0',
|
django-cms__django-cms-3868 | Implements a different live/draft switcher for placeholders outside of the CMS.
Placeholders outside of the CMS currently cause the live/draft switcher to be displayed, which toggles the editing mode. However, for non-CMS models, draft versions are not implemented. This can be confusing to users.
This PR adds another switcher template that uses the language "Editing Off" in place of "Draft" and "Editing Live" in place of "Live." The PageToolbar.add_draft_live() method has been modified to take the parameter "is_page" which determines the template used.
Tests have not been implemented as there are currently no tests for cms_toolbar.py.
Some thoughts: Should there be a separate method for adding the new toggle, as opposed to piggybacking off of add_draft_live? Should there be one template for the switcher that we hand text to? Can anyone offer guidance on implementing tests?
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import admin\nfrom django.contrib.auth import get_permission_codename\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.sites.models import Site\n\nfrom cms.api import get_page_draft\nfrom cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING\nfrom cms.models import Title, Page\nfrom cms.toolbar.items import TemplateItem\nfrom cms.toolbar_base import CMSToolbar\nfrom cms.toolbar_pool import toolbar_pool\nfrom cms.utils.i18n import get_language_tuple, force_language\nfrom cms.utils.compat.dj import is_installed\nfrom cms.utils import get_cms_setting\nfrom cms.utils.permissions import get_user_sites_queryset, has_page_change_permission\nfrom cms.utils.urlutils import add_url_parameters, admin_reverse\nfrom menus.utils import DefaultLanguageChanger\n\n\n# Identifiers for search\nADMIN_MENU_IDENTIFIER = 'admin-menu'\nLANGUAGE_MENU_IDENTIFIER = 'language-menu'\nTEMPLATE_MENU_BREAK = 'Template Menu Break'\nPAGE_MENU_IDENTIFIER = 'page'\nPAGE_MENU_ADD_IDENTIFIER = 'add_page'\nPAGE_MENU_FIRST_BREAK = 'Page Menu First Break'\nPAGE_MENU_SECOND_BREAK = 'Page Menu Second Break'\nPAGE_MENU_THIRD_BREAK = 'Page Menu Third Break'\nPAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break'\nPAGE_MENU_LAST_BREAK = 'Page Menu Last Break'\nHISTORY_MENU_IDENTIFIER = 'history'\nHISTORY_MENU_BREAK = 'History Menu Break'\nMANAGE_PAGES_BREAK = 'Manage Pages Break'\nADMIN_SITES_BREAK = 'Admin Sites Break'\nADMINISTRATION_BREAK = 'Administration Break'\nUSER_SETTINGS_BREAK = 'User Settings Break'\nADD_PAGE_LANGUAGE_BREAK = \"Add page language Break\"\nREMOVE_PAGE_LANGUAGE_BREAK = \"Remove page language Break\"\nCOPY_PAGE_LANGUAGE_BREAK = \"Copy page language Break\"\n\n\n@toolbar_pool.register\nclass PlaceholderToolbar(CMSToolbar):\n \"\"\"\n Adds placeholder edit buttons if placeholders or static placeholders are detected in the template\n \"\"\"\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n\n def init_placeholders_from_request(self):\n self.placeholders = getattr(self.request, 'placeholders', [])\n self.statics = getattr(self.request, 'static_placeholders', [])\n\n def populate(self):\n self.init_from_request()\n\n def post_template_populate(self):\n self.init_placeholders_from_request()\n\n self.add_structure_mode()\n\n def add_structure_mode(self):\n if self.page and not self.page.application_urls:\n if self.page.has_change_permission(self.request):\n return self.add_structure_mode_item()\n\n elif self.placeholders:\n return self.add_structure_mode_item()\n\n for sp in self.statics:\n if sp.has_change_permission(self.request):\n return self.add_structure_mode_item()\n\n def add_structure_mode_item(self, extra_classes=('cms_toolbar-item-cms-mode-switcher',)):\n build_mode = self.toolbar.build_mode\n build_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__BUILD')\n edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')\n switcher = self.toolbar.add_button_list('Mode Switcher', side=self.toolbar.RIGHT, extra_classes=extra_classes)\n switcher.add_button(_('Structure'), build_url, active=build_mode, disabled=not build_mode)\n switcher.add_button(_('Content'), edit_url, active=not build_mode, disabled=build_mode)\n\n\n@toolbar_pool.register\nclass BasicToolbar(CMSToolbar):\n \"\"\"\n Basic Toolbar for site and languages menu\n \"\"\"\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n\n def populate(self):\n self.init_from_request()\n\n self.add_admin_menu()\n self.add_language_menu()\n\n def add_admin_menu(self):\n admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name)\n\n # Users button\n self.add_users_button(admin_menu)\n\n # sites menu\n if get_cms_setting('PERMISSION'):\n sites_queryset = get_user_sites_queryset(self.request.user)\n else:\n sites_queryset = Site.objects.all()\n\n if len(sites_queryset) > 1:\n sites_menu = admin_menu.get_or_create_menu('sites', _('Sites'))\n sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist'))\n sites_menu.add_break(ADMIN_SITES_BREAK)\n for site in sites_queryset:\n sites_menu.add_link_item(site.name, url='http://%s' % site.domain,\n active=site.pk == self.current_site.pk)\n\n # admin\n admin_menu.add_sideframe_item(_('Administration'), url=admin_reverse('index'))\n admin_menu.add_break(ADMINISTRATION_BREAK)\n\n # cms users\n admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change'))\n admin_menu.add_break(USER_SETTINGS_BREAK)\n\n # logout\n self.add_logout_button(admin_menu)\n\n def add_users_button(self, parent):\n User = get_user_model()\n\n if User in admin.site._registry:\n opts = User._meta\n\n if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))):\n user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name))\n parent.add_sideframe_item(_('Users'), url=user_changelist_url)\n\n def add_logout_button(self, parent):\n # If current page is not published or has view restrictions user is redirected to the home page:\n # * published page: no redirect\n # * unpublished page: redirect to the home page\n # * published page with login_required: redirect to the home page\n # * published page with view permissions: redirect to the home page\n\n if (self.page and self.page.is_published(self.current_lang) and not self.page.login_required and\n self.page.has_view_permission(self.request, AnonymousUser())):\n on_success = self.toolbar.REFRESH_PAGE\n else:\n on_success = '/'\n\n # We'll show \"Logout Joe Bloggs\" if the name fields in auth.User are completed, else \"Logout jbloggs\". If\n # anything goes wrong, it'll just be \"Logout\".\n\n user_name = self.get_username()\n logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout')\n\n parent.add_ajax_item(logout_menu_text, action=admin_reverse('logout'), active=True, on_success=on_success)\n\n def add_language_menu(self):\n if settings.USE_I18N:\n language_menu = self.toolbar.get_or_create_menu(LANGUAGE_MENU_IDENTIFIER, _('Language'))\n language_changer = getattr(self.request, '_language_changer', DefaultLanguageChanger(self.request))\n for code, name in get_language_tuple(self.current_site.pk):\n try:\n url = language_changer(code)\n except NoReverseMatch:\n url = DefaultLanguageChanger(self.request)(code)\n language_menu.add_link_item(name, url=url, active=self.current_lang == code)\n\n def get_username(self, user=None, default=''):\n user = user or self.request.user\n try:\n name = user.get_full_name()\n if name:\n return name\n else:\n return user.get_username()\n except (AttributeError, NotImplementedError):\n return default\n\n\n@toolbar_pool.register\nclass PageToolbar(CMSToolbar):\n watch_models = [Page]\n\n # Helpers\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n self.title = self.get_title()\n self.permissions_activated = get_cms_setting('PERMISSION')\n\n def init_placeholders_from_request(self):\n self.placeholders = getattr(self.request, 'placeholders', [])\n self.statics = getattr(self.request, 'static_placeholders', [])\n self.dirty_statics = [sp for sp in self.statics if sp.dirty]\n\n def get_title(self):\n try:\n return Title.objects.get(page=self.page, language=self.current_lang, publisher_is_draft=True)\n except Title.DoesNotExist:\n return None\n\n def has_publish_permission(self):\n if not hasattr(self, 'publish_permission'):\n publish_permission = bool(self.page or self.statics)\n\n if self.page:\n publish_permission = self.page.has_publish_permission(self.request)\n\n if self.statics:\n publish_permission &= all(sp.has_publish_permission(self.request) for sp in self.dirty_statics)\n\n self.publish_permission = publish_permission\n\n return self.publish_permission\n\n def has_page_change_permission(self):\n if not hasattr(self, 'page_change_permission'):\n # check global permissions if CMS_PERMISSIONS is active\n global_permission = self.permissions_activated and has_page_change_permission(self.request)\n\n # check if user has page edit permission\n page_permission = self.page and self.page.has_change_permission(self.request)\n\n self.page_change_permission = global_permission or page_permission\n\n return self.page_change_permission\n\n def page_is_pending(self, page, language):\n return (page.publisher_public_id and\n page.publisher_public.get_publisher_state(language) == PUBLISHER_STATE_PENDING)\n\n def in_apphook(self):\n with force_language(self.toolbar.language):\n try:\n resolver = resolve(self.request.path_info)\n except Resolver404:\n return False\n else:\n from cms.views import details\n return resolver.func != details\n\n def get_on_delete_redirect_url(self):\n parent, language = self.page.parent, self.current_lang\n\n # if the current page has a parent in the request's current language redirect to it\n if parent and language in parent.get_languages():\n with force_language(language):\n return parent.get_absolute_url(language=language)\n\n # else redirect to root, do not redirect to Page.objects.get_home() because user could have deleted the last\n # page, if DEBUG == False this could cause a 404\n return reverse('pages-root')\n\n # Populate\n\n def populate(self):\n self.init_from_request()\n\n self.change_admin_menu()\n self.add_page_menu()\n self.add_history_menu()\n self.change_language_menu()\n\n def post_template_populate(self):\n self.init_placeholders_from_request()\n\n self.add_publish_button()\n self.add_draft_live()\n\n # Buttons\n\n def add_publish_button(self, classes=('cms_btn-action', 'cms_btn-publish',)):\n # only do dirty lookups if publish permission is granted else button isn't added anyway\n if self.toolbar.edit_mode and self.has_publish_permission():\n classes = list(classes or [])\n pk = self.page.pk if self.page else 0\n\n dirty = (bool(self.dirty_statics) or\n (self.page and (self.page.is_dirty(self.current_lang) or\n self.page_is_pending(self.page, self.current_lang))))\n\n if dirty:\n classes.append('cms_btn-publish-active')\n\n if self.dirty_statics or (self.page and self.page.is_published(self.current_lang)):\n title = _('Publish changes')\n else:\n title = _('Publish page now')\n classes.append('cms_publish-page')\n\n params = {}\n\n if self.dirty_statics:\n params['statics'] = ','.join(str(sp.pk) for sp in self.dirty_statics)\n\n if self.in_apphook():\n params['redirect'] = self.request.path_info\n\n with force_language(self.current_lang):\n url = admin_reverse('cms_page_publish_page', args=(pk, self.current_lang))\n\n url = add_url_parameters(url, params)\n\n self.toolbar.add_button(title, url=url, extra_classes=classes,\n side=self.toolbar.RIGHT, disabled=not dirty)\n\n def add_draft_live(self):\n if self.page:\n if self.toolbar.edit_mode and not self.title:\n self.add_page_settings_button()\n\n if self.page.has_change_permission(self.request) and self.page.is_published(self.current_lang):\n return self.add_draft_live_item()\n\n elif self.placeholders:\n return self.add_draft_live_item()\n\n for sp in self.statics:\n if sp.has_change_permission(self.request):\n return self.add_draft_live_item()\n\n def add_draft_live_item(self, template='cms/toolbar/items/live_draft.html', extra_context=None):\n context = {'request': self.request}\n context.update(extra_context or {})\n pos = len(self.toolbar.right_items)\n self.toolbar.add_item(TemplateItem(template, extra_context=context, side=self.toolbar.RIGHT), position=pos)\n\n def add_page_settings_button(self, extra_classes=('cms_btn-action',)):\n url = '%s?language=%s' % (admin_reverse('cms_page_change', args=[self.page.pk]), self.toolbar.language)\n self.toolbar.add_modal_button(_('Page settings'), url, side=self.toolbar.RIGHT, extra_classes=extra_classes)\n\n # Menus\n\n def change_language_menu(self):\n if self.toolbar.edit_mode and self.page:\n language_menu = self.toolbar.get_menu(LANGUAGE_MENU_IDENTIFIER)\n if not language_menu:\n return None\n\n languages = get_language_tuple(self.current_site.pk)\n languages_dict = dict(languages)\n\n remove = [(code, languages_dict.get(code, code)) for code in self.page.get_languages()]\n add = [l for l in languages if l not in remove]\n copy = [(code, name) for code, name in languages if code != self.current_lang and (code, name) in remove]\n\n if add:\n language_menu.add_break(ADD_PAGE_LANGUAGE_BREAK)\n page_change_url = admin_reverse('cms_page_change', args=(self.page.pk,))\n title = _('Add %(language)s Translation')\n for code, name in add:\n url = add_url_parameters(page_change_url, language=code)\n language_menu.add_modal_item(title % {'language': name}, url=url)\n\n if remove:\n language_menu.add_break(REMOVE_PAGE_LANGUAGE_BREAK)\n translation_delete_url = admin_reverse('cms_page_delete_translation', args=(self.page.pk,))\n title = _('Delete %(language)s Translation')\n disabled = len(remove) == 1\n for code, name in remove:\n url = add_url_parameters(translation_delete_url, language=code)\n language_menu.add_modal_item(title % {'language': name}, url=url, disabled=disabled)\n\n if copy:\n language_menu.add_break(COPY_PAGE_LANGUAGE_BREAK)\n page_copy_url = admin_reverse('cms_page_copy_language', args=(self.page.pk,))\n title = _('Copy all plugins from %s')\n question = _('Are you sure you want copy all plugins from %s?')\n for code, name in copy:\n language_menu.add_ajax_item(title % name, action=page_copy_url,\n data={'source_language': code, 'target_language': self.current_lang},\n question=question % name, on_success=self.toolbar.REFRESH_PAGE)\n\n def change_admin_menu(self):\n if self.has_page_change_permission():\n admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)\n url = admin_reverse('cms_page_changelist') # cms page admin\n params = {'language': self.toolbar.language}\n if self.page:\n params['page_id'] = self.page.pk\n url = add_url_parameters(url, params)\n admin_menu.add_sideframe_item(_('Pages'), url=url, position=0)\n\n def add_page_menu(self):\n if self.page and self.has_page_change_permission():\n edit_mode = self.toolbar.edit_mode\n refresh = self.toolbar.REFRESH_PAGE\n\n # menu for current page\n current_page_menu = self.toolbar.get_or_create_menu(PAGE_MENU_IDENTIFIER, _('Page'), position=1)\n\n # page operations menu\n add_page_menu = current_page_menu.get_or_create_menu(PAGE_MENU_ADD_IDENTIFIER, _('Add Page'))\n app_page_url = admin_reverse('cms_page_add')\n add_page_menu_sideframe_items = (\n (_('New Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.parent_id or ''}),\n (_('New Sub Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.pk}),\n (_('Duplicate this Page'), {'copy_target': self.page.pk})\n )\n\n for title, params in add_page_menu_sideframe_items:\n params.update(language=self.toolbar.language)\n add_page_menu.add_sideframe_item(title, url=add_url_parameters(app_page_url, params))\n\n # first break\n current_page_menu.add_break(PAGE_MENU_FIRST_BREAK)\n\n # page edit\n page_edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')\n current_page_menu.add_link_item(_('Edit this Page'), disabled=edit_mode, url=page_edit_url)\n\n # page settings\n page_settings_url = admin_reverse('cms_page_change', args=(self.page.pk,))\n page_settings_url = add_url_parameters(page_settings_url, language=self.toolbar.language)\n current_page_menu.add_modal_item(_('Page settings'), url=page_settings_url, disabled=not edit_mode,\n on_close=refresh)\n\n # templates menu\n if self.toolbar.build_mode or edit_mode:\n templates_menu = current_page_menu.get_or_create_menu('templates', _('Templates'))\n action = admin_reverse('cms_page_change_template', args=(self.page.pk,))\n for path, name in get_cms_setting('TEMPLATES'):\n active = self.page.template == path\n if path == TEMPLATE_INHERITANCE_MAGIC:\n templates_menu.add_break(TEMPLATE_MENU_BREAK)\n templates_menu.add_ajax_item(name, action=action, data={'template': path}, active=active,\n on_success=refresh)\n\n # second break\n current_page_menu.add_break(PAGE_MENU_SECOND_BREAK)\n\n # advanced settings\n advanced_url = admin_reverse('cms_page_advanced', args=(self.page.pk,))\n advanced_url = add_url_parameters(advanced_url, language=self.toolbar.language)\n advanced_disabled = not self.page.has_advanced_settings_permission(self.request) or not edit_mode\n current_page_menu.add_modal_item(_('Advanced settings'), url=advanced_url, disabled=advanced_disabled)\n\n # permissions\n if self.permissions_activated:\n permissions_url = admin_reverse('cms_page_permissions', args=(self.page.pk,))\n permission_disabled = not edit_mode or not self.page.has_change_permissions_permission(self.request)\n current_page_menu.add_modal_item(_('Permissions'), url=permissions_url, disabled=permission_disabled)\n\n # dates settings\n dates_url = admin_reverse('cms_page_dates', args=(self.page.pk,))\n current_page_menu.add_modal_item(_('Publishing dates'), url=dates_url, disabled=not edit_mode)\n\n # third break\n current_page_menu.add_break(PAGE_MENU_THIRD_BREAK)\n\n # navigation toggle\n nav_title = _('Hide in navigation') if self.page.in_navigation else _('Display in navigation')\n nav_action = admin_reverse('cms_page_change_innavigation', args=(self.page.pk,))\n current_page_menu.add_ajax_item(nav_title, action=nav_action, disabled=not edit_mode, on_success=refresh)\n\n # publisher\n if self.title:\n if self.title.published:\n publish_title = _('Unpublish page')\n publish_url = admin_reverse('cms_page_unpublish', args=(self.page.pk, self.current_lang))\n else:\n publish_title = _('Publish page')\n publish_url = admin_reverse('cms_page_publish_page', args=(self.page.pk, self.current_lang))\n current_page_menu.add_ajax_item(publish_title, action=publish_url, disabled=not edit_mode,\n on_success=refresh)\n\n # fourth break\n current_page_menu.add_break(PAGE_MENU_FOURTH_BREAK)\n\n # delete\n delete_url = admin_reverse('cms_page_delete', args=(self.page.pk,))\n on_delete_redirect_url = self.get_on_delete_redirect_url()\n current_page_menu.add_modal_item(_('Delete page'), url=delete_url, on_close=on_delete_redirect_url,\n disabled=not edit_mode)\n\n # last break\n current_page_menu.add_break(PAGE_MENU_LAST_BREAK)\n\n # page type\n page_type_url = admin_reverse('cms_page_add_page_type')\n page_type_url = add_url_parameters(page_type_url, copy_target=self.page.pk, language=self.toolbar.language)\n current_page_menu.add_modal_item(_('Save as Page Type'), page_type_url, disabled=not edit_mode)\n\n def add_history_menu(self):\n if self.toolbar.edit_mode and self.page:\n refresh = self.toolbar.REFRESH_PAGE\n history_menu = self.toolbar.get_or_create_menu(HISTORY_MENU_IDENTIFIER, _('History'), position=2)\n\n if is_installed('reversion'):\n import reversion\n from reversion.models import Revision\n\n versions = reversion.get_for_object(self.page)\n if self.page.revision_id:\n current_revision = Revision.objects.get(pk=self.page.revision_id)\n has_undo = versions.filter(revision__pk__lt=current_revision.pk).exists()\n has_redo = versions.filter(revision__pk__gt=current_revision.pk).exists()\n else:\n has_redo = False\n has_undo = versions.count() > 1\n\n undo_action = admin_reverse('cms_page_undo', args=(self.page.pk,))\n redo_action = admin_reverse('cms_page_redo', args=(self.page.pk,))\n\n history_menu.add_ajax_item(_('Undo'), action=undo_action, disabled=not has_undo, on_success=refresh)\n history_menu.add_ajax_item(_('Redo'), action=redo_action, disabled=not has_redo, on_success=refresh)\n\n history_menu.add_break(HISTORY_MENU_BREAK)\n\n revert_action = admin_reverse('cms_page_revert_page', args=(self.page.pk, self.current_lang))\n revert_question = _('Are you sure you want to revert to live?')\n history_menu.add_ajax_item(_('Revert to live'), action=revert_action, question=revert_question,\n disabled=not self.page.is_dirty(self.current_lang), on_success=refresh)\n history_menu.add_modal_item(_('View history'), url=admin_reverse('cms_page_history', args=(self.page.pk,)))\n",
"path": "cms/cms_toolbar.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import admin\nfrom django.contrib.auth import get_permission_codename\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.sites.models import Site\n\nfrom cms.api import get_page_draft\nfrom cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING\nfrom cms.models import Title, Page\nfrom cms.toolbar.items import TemplateItem\nfrom cms.toolbar_base import CMSToolbar\nfrom cms.toolbar_pool import toolbar_pool\nfrom cms.utils.i18n import get_language_tuple, force_language\nfrom cms.utils.compat.dj import is_installed\nfrom cms.utils import get_cms_setting\nfrom cms.utils.permissions import get_user_sites_queryset, has_page_change_permission\nfrom cms.utils.urlutils import add_url_parameters, admin_reverse\nfrom menus.utils import DefaultLanguageChanger\n\n\n# Identifiers for search\nADMIN_MENU_IDENTIFIER = 'admin-menu'\nLANGUAGE_MENU_IDENTIFIER = 'language-menu'\nTEMPLATE_MENU_BREAK = 'Template Menu Break'\nPAGE_MENU_IDENTIFIER = 'page'\nPAGE_MENU_ADD_IDENTIFIER = 'add_page'\nPAGE_MENU_FIRST_BREAK = 'Page Menu First Break'\nPAGE_MENU_SECOND_BREAK = 'Page Menu Second Break'\nPAGE_MENU_THIRD_BREAK = 'Page Menu Third Break'\nPAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break'\nPAGE_MENU_LAST_BREAK = 'Page Menu Last Break'\nHISTORY_MENU_IDENTIFIER = 'history'\nHISTORY_MENU_BREAK = 'History Menu Break'\nMANAGE_PAGES_BREAK = 'Manage Pages Break'\nADMIN_SITES_BREAK = 'Admin Sites Break'\nADMINISTRATION_BREAK = 'Administration Break'\nUSER_SETTINGS_BREAK = 'User Settings Break'\nADD_PAGE_LANGUAGE_BREAK = \"Add page language Break\"\nREMOVE_PAGE_LANGUAGE_BREAK = \"Remove page language Break\"\nCOPY_PAGE_LANGUAGE_BREAK = \"Copy page language Break\"\n\n\n@toolbar_pool.register\nclass PlaceholderToolbar(CMSToolbar):\n \"\"\"\n Adds placeholder edit buttons if placeholders or static placeholders are detected in the template\n \"\"\"\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n\n def init_placeholders_from_request(self):\n self.placeholders = getattr(self.request, 'placeholders', [])\n self.statics = getattr(self.request, 'static_placeholders', [])\n\n def populate(self):\n self.init_from_request()\n\n def post_template_populate(self):\n self.init_placeholders_from_request()\n\n self.add_structure_mode()\n\n def add_structure_mode(self):\n if self.page and not self.page.application_urls:\n if self.page.has_change_permission(self.request):\n return self.add_structure_mode_item()\n\n elif self.placeholders:\n return self.add_structure_mode_item()\n\n for sp in self.statics:\n if sp.has_change_permission(self.request):\n return self.add_structure_mode_item()\n\n def add_structure_mode_item(self, extra_classes=('cms_toolbar-item-cms-mode-switcher',)):\n build_mode = self.toolbar.build_mode\n build_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__BUILD')\n edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')\n switcher = self.toolbar.add_button_list('Mode Switcher', side=self.toolbar.RIGHT, extra_classes=extra_classes)\n switcher.add_button(_('Structure'), build_url, active=build_mode, disabled=not build_mode)\n switcher.add_button(_('Content'), edit_url, active=not build_mode, disabled=build_mode)\n\n\n@toolbar_pool.register\nclass BasicToolbar(CMSToolbar):\n \"\"\"\n Basic Toolbar for site and languages menu\n \"\"\"\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n\n def populate(self):\n self.init_from_request()\n\n self.add_admin_menu()\n self.add_language_menu()\n\n def add_admin_menu(self):\n admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name)\n\n # Users button\n self.add_users_button(admin_menu)\n\n # sites menu\n if get_cms_setting('PERMISSION'):\n sites_queryset = get_user_sites_queryset(self.request.user)\n else:\n sites_queryset = Site.objects.all()\n\n if len(sites_queryset) > 1:\n sites_menu = admin_menu.get_or_create_menu('sites', _('Sites'))\n sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist'))\n sites_menu.add_break(ADMIN_SITES_BREAK)\n for site in sites_queryset:\n sites_menu.add_link_item(site.name, url='http://%s' % site.domain,\n active=site.pk == self.current_site.pk)\n\n # admin\n admin_menu.add_sideframe_item(_('Administration'), url=admin_reverse('index'))\n admin_menu.add_break(ADMINISTRATION_BREAK)\n\n # cms users\n admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change'))\n admin_menu.add_break(USER_SETTINGS_BREAK)\n\n # logout\n self.add_logout_button(admin_menu)\n\n def add_users_button(self, parent):\n User = get_user_model()\n\n if User in admin.site._registry:\n opts = User._meta\n\n if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))):\n user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name))\n parent.add_sideframe_item(_('Users'), url=user_changelist_url)\n\n def add_logout_button(self, parent):\n # If current page is not published or has view restrictions user is redirected to the home page:\n # * published page: no redirect\n # * unpublished page: redirect to the home page\n # * published page with login_required: redirect to the home page\n # * published page with view permissions: redirect to the home page\n\n if (self.page and self.page.is_published(self.current_lang) and not self.page.login_required and\n self.page.has_view_permission(self.request, AnonymousUser())):\n on_success = self.toolbar.REFRESH_PAGE\n else:\n on_success = '/'\n\n # We'll show \"Logout Joe Bloggs\" if the name fields in auth.User are completed, else \"Logout jbloggs\". If\n # anything goes wrong, it'll just be \"Logout\".\n\n user_name = self.get_username()\n logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout')\n\n parent.add_ajax_item(logout_menu_text, action=admin_reverse('logout'), active=True, on_success=on_success)\n\n def add_language_menu(self):\n if settings.USE_I18N:\n language_menu = self.toolbar.get_or_create_menu(LANGUAGE_MENU_IDENTIFIER, _('Language'))\n language_changer = getattr(self.request, '_language_changer', DefaultLanguageChanger(self.request))\n for code, name in get_language_tuple(self.current_site.pk):\n try:\n url = language_changer(code)\n except NoReverseMatch:\n url = DefaultLanguageChanger(self.request)(code)\n language_menu.add_link_item(name, url=url, active=self.current_lang == code)\n\n def get_username(self, user=None, default=''):\n user = user or self.request.user\n try:\n name = user.get_full_name()\n if name:\n return name\n else:\n return user.get_username()\n except (AttributeError, NotImplementedError):\n return default\n\n\n@toolbar_pool.register\nclass PageToolbar(CMSToolbar):\n watch_models = [Page]\n\n # Helpers\n\n def init_from_request(self):\n self.page = get_page_draft(self.request.current_page)\n self.title = self.get_title()\n self.permissions_activated = get_cms_setting('PERMISSION')\n\n def init_placeholders_from_request(self):\n self.placeholders = getattr(self.request, 'placeholders', [])\n self.statics = getattr(self.request, 'static_placeholders', [])\n self.dirty_statics = [sp for sp in self.statics if sp.dirty]\n\n def get_title(self):\n try:\n return Title.objects.get(page=self.page, language=self.current_lang, publisher_is_draft=True)\n except Title.DoesNotExist:\n return None\n\n def has_publish_permission(self):\n if not hasattr(self, 'publish_permission'):\n publish_permission = bool(self.page or self.statics)\n\n if self.page:\n publish_permission = self.page.has_publish_permission(self.request)\n\n if self.statics:\n publish_permission &= all(sp.has_publish_permission(self.request) for sp in self.dirty_statics)\n\n self.publish_permission = publish_permission\n\n return self.publish_permission\n\n def has_page_change_permission(self):\n if not hasattr(self, 'page_change_permission'):\n # check global permissions if CMS_PERMISSIONS is active\n global_permission = self.permissions_activated and has_page_change_permission(self.request)\n\n # check if user has page edit permission\n page_permission = self.page and self.page.has_change_permission(self.request)\n\n self.page_change_permission = global_permission or page_permission\n\n return self.page_change_permission\n\n def page_is_pending(self, page, language):\n return (page.publisher_public_id and\n page.publisher_public.get_publisher_state(language) == PUBLISHER_STATE_PENDING)\n\n def in_apphook(self):\n with force_language(self.toolbar.language):\n try:\n resolver = resolve(self.request.path_info)\n except Resolver404:\n return False\n else:\n from cms.views import details\n return resolver.func != details\n\n def get_on_delete_redirect_url(self):\n parent, language = self.page.parent, self.current_lang\n\n # if the current page has a parent in the request's current language redirect to it\n if parent and language in parent.get_languages():\n with force_language(language):\n return parent.get_absolute_url(language=language)\n\n # else redirect to root, do not redirect to Page.objects.get_home() because user could have deleted the last\n # page, if DEBUG == False this could cause a 404\n return reverse('pages-root')\n\n # Populate\n\n def populate(self):\n self.init_from_request()\n\n self.change_admin_menu()\n self.add_page_menu()\n self.add_history_menu()\n self.change_language_menu()\n\n def post_template_populate(self):\n self.init_placeholders_from_request()\n\n self.add_draft_live()\n self.add_publish_button()\n\n # Buttons\n\n def add_publish_button(self, classes=('cms_btn-action', 'cms_btn-publish',)):\n # only do dirty lookups if publish permission is granted else button isn't added anyway\n if self.toolbar.edit_mode and self.has_publish_permission():\n classes = list(classes or [])\n pk = self.page.pk if self.page else 0\n\n dirty = (bool(self.dirty_statics) or\n (self.page and (self.page.is_dirty(self.current_lang) or\n self.page_is_pending(self.page, self.current_lang))))\n\n if dirty:\n classes.append('cms_btn-publish-active')\n\n if self.dirty_statics or (self.page and self.page.is_published(self.current_lang)):\n title = _('Publish changes')\n else:\n title = _('Publish page now')\n classes.append('cms_publish-page')\n\n params = {}\n\n if self.dirty_statics:\n params['statics'] = ','.join(str(sp.pk) for sp in self.dirty_statics)\n\n if self.in_apphook():\n params['redirect'] = self.request.path_info\n\n with force_language(self.current_lang):\n url = admin_reverse('cms_page_publish_page', args=(pk, self.current_lang))\n\n url = add_url_parameters(url, params)\n\n self.toolbar.add_button(title, url=url, extra_classes=classes,\n side=self.toolbar.RIGHT, disabled=not dirty)\n\n def add_draft_live(self):\n if self.page:\n if self.toolbar.edit_mode and not self.title:\n self.add_page_settings_button()\n\n if self.page.has_change_permission(self.request) and self.page.is_published(self.current_lang):\n return self.add_draft_live_item()\n\n elif self.placeholders:\n return self.add_draft_live_item()\n\n for sp in self.statics:\n if sp.has_change_permission(self.request):\n return self.add_draft_live_item()\n\n def add_draft_live_item(self, template='cms/toolbar/items/live_draft.html', extra_context=None):\n context = {'request': self.request}\n context.update(extra_context or {})\n pos = len(self.toolbar.right_items)\n self.toolbar.add_item(TemplateItem(template, extra_context=context, side=self.toolbar.RIGHT), position=pos)\n\n def add_page_settings_button(self, extra_classes=('cms_btn-action',)):\n url = '%s?language=%s' % (admin_reverse('cms_page_change', args=[self.page.pk]), self.toolbar.language)\n self.toolbar.add_modal_button(_('Page settings'), url, side=self.toolbar.RIGHT, extra_classes=extra_classes)\n\n # Menus\n\n def change_language_menu(self):\n if self.toolbar.edit_mode and self.page:\n language_menu = self.toolbar.get_menu(LANGUAGE_MENU_IDENTIFIER)\n if not language_menu:\n return None\n\n languages = get_language_tuple(self.current_site.pk)\n languages_dict = dict(languages)\n\n remove = [(code, languages_dict.get(code, code)) for code in self.page.get_languages()]\n add = [l for l in languages if l not in remove]\n copy = [(code, name) for code, name in languages if code != self.current_lang and (code, name) in remove]\n\n if add:\n language_menu.add_break(ADD_PAGE_LANGUAGE_BREAK)\n page_change_url = admin_reverse('cms_page_change', args=(self.page.pk,))\n title = _('Add %(language)s Translation')\n for code, name in add:\n url = add_url_parameters(page_change_url, language=code)\n language_menu.add_modal_item(title % {'language': name}, url=url)\n\n if remove:\n language_menu.add_break(REMOVE_PAGE_LANGUAGE_BREAK)\n translation_delete_url = admin_reverse('cms_page_delete_translation', args=(self.page.pk,))\n title = _('Delete %(language)s Translation')\n disabled = len(remove) == 1\n for code, name in remove:\n url = add_url_parameters(translation_delete_url, language=code)\n language_menu.add_modal_item(title % {'language': name}, url=url, disabled=disabled)\n\n if copy:\n language_menu.add_break(COPY_PAGE_LANGUAGE_BREAK)\n page_copy_url = admin_reverse('cms_page_copy_language', args=(self.page.pk,))\n title = _('Copy all plugins from %s')\n question = _('Are you sure you want copy all plugins from %s?')\n for code, name in copy:\n language_menu.add_ajax_item(title % name, action=page_copy_url,\n data={'source_language': code, 'target_language': self.current_lang},\n question=question % name, on_success=self.toolbar.REFRESH_PAGE)\n\n def change_admin_menu(self):\n if self.has_page_change_permission():\n admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)\n url = admin_reverse('cms_page_changelist') # cms page admin\n params = {'language': self.toolbar.language}\n if self.page:\n params['page_id'] = self.page.pk\n url = add_url_parameters(url, params)\n admin_menu.add_sideframe_item(_('Pages'), url=url, position=0)\n\n def add_page_menu(self):\n if self.page and self.has_page_change_permission():\n edit_mode = self.toolbar.edit_mode\n refresh = self.toolbar.REFRESH_PAGE\n\n # menu for current page\n current_page_menu = self.toolbar.get_or_create_menu(PAGE_MENU_IDENTIFIER, _('Page'), position=1)\n\n # page operations menu\n add_page_menu = current_page_menu.get_or_create_menu(PAGE_MENU_ADD_IDENTIFIER, _('Add Page'))\n app_page_url = admin_reverse('cms_page_add')\n add_page_menu_sideframe_items = (\n (_('New Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.parent_id or ''}),\n (_('New Sub Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.pk}),\n (_('Duplicate this Page'), {'copy_target': self.page.pk})\n )\n\n for title, params in add_page_menu_sideframe_items:\n params.update(language=self.toolbar.language)\n add_page_menu.add_sideframe_item(title, url=add_url_parameters(app_page_url, params))\n\n # first break\n current_page_menu.add_break(PAGE_MENU_FIRST_BREAK)\n\n # page edit\n page_edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')\n current_page_menu.add_link_item(_('Edit this Page'), disabled=edit_mode, url=page_edit_url)\n\n # page settings\n page_settings_url = admin_reverse('cms_page_change', args=(self.page.pk,))\n page_settings_url = add_url_parameters(page_settings_url, language=self.toolbar.language)\n current_page_menu.add_modal_item(_('Page settings'), url=page_settings_url, disabled=not edit_mode,\n on_close=refresh)\n\n # templates menu\n if self.toolbar.build_mode or edit_mode:\n templates_menu = current_page_menu.get_or_create_menu('templates', _('Templates'))\n action = admin_reverse('cms_page_change_template', args=(self.page.pk,))\n for path, name in get_cms_setting('TEMPLATES'):\n active = self.page.template == path\n if path == TEMPLATE_INHERITANCE_MAGIC:\n templates_menu.add_break(TEMPLATE_MENU_BREAK)\n templates_menu.add_ajax_item(name, action=action, data={'template': path}, active=active,\n on_success=refresh)\n\n # second break\n current_page_menu.add_break(PAGE_MENU_SECOND_BREAK)\n\n # advanced settings\n advanced_url = admin_reverse('cms_page_advanced', args=(self.page.pk,))\n advanced_url = add_url_parameters(advanced_url, language=self.toolbar.language)\n advanced_disabled = not self.page.has_advanced_settings_permission(self.request) or not edit_mode\n current_page_menu.add_modal_item(_('Advanced settings'), url=advanced_url, disabled=advanced_disabled)\n\n # permissions\n if self.permissions_activated:\n permissions_url = admin_reverse('cms_page_permissions', args=(self.page.pk,))\n permission_disabled = not edit_mode or not self.page.has_change_permissions_permission(self.request)\n current_page_menu.add_modal_item(_('Permissions'), url=permissions_url, disabled=permission_disabled)\n\n # dates settings\n dates_url = admin_reverse('cms_page_dates', args=(self.page.pk,))\n current_page_menu.add_modal_item(_('Publishing dates'), url=dates_url, disabled=not edit_mode)\n\n # third break\n current_page_menu.add_break(PAGE_MENU_THIRD_BREAK)\n\n # navigation toggle\n nav_title = _('Hide in navigation') if self.page.in_navigation else _('Display in navigation')\n nav_action = admin_reverse('cms_page_change_innavigation', args=(self.page.pk,))\n current_page_menu.add_ajax_item(nav_title, action=nav_action, disabled=not edit_mode, on_success=refresh)\n\n # publisher\n if self.title:\n if self.title.published:\n publish_title = _('Unpublish page')\n publish_url = admin_reverse('cms_page_unpublish', args=(self.page.pk, self.current_lang))\n else:\n publish_title = _('Publish page')\n publish_url = admin_reverse('cms_page_publish_page', args=(self.page.pk, self.current_lang))\n current_page_menu.add_ajax_item(publish_title, action=publish_url, disabled=not edit_mode,\n on_success=refresh)\n\n # fourth break\n current_page_menu.add_break(PAGE_MENU_FOURTH_BREAK)\n\n # delete\n delete_url = admin_reverse('cms_page_delete', args=(self.page.pk,))\n on_delete_redirect_url = self.get_on_delete_redirect_url()\n current_page_menu.add_modal_item(_('Delete page'), url=delete_url, on_close=on_delete_redirect_url,\n disabled=not edit_mode)\n\n # last break\n current_page_menu.add_break(PAGE_MENU_LAST_BREAK)\n\n # page type\n page_type_url = admin_reverse('cms_page_add_page_type')\n page_type_url = add_url_parameters(page_type_url, copy_target=self.page.pk, language=self.toolbar.language)\n current_page_menu.add_modal_item(_('Save as Page Type'), page_type_url, disabled=not edit_mode)\n\n def add_history_menu(self):\n if self.toolbar.edit_mode and self.page:\n refresh = self.toolbar.REFRESH_PAGE\n history_menu = self.toolbar.get_or_create_menu(HISTORY_MENU_IDENTIFIER, _('History'), position=2)\n\n if is_installed('reversion'):\n import reversion\n from reversion.models import Revision\n\n versions = reversion.get_for_object(self.page)\n if self.page.revision_id:\n current_revision = Revision.objects.get(pk=self.page.revision_id)\n has_undo = versions.filter(revision__pk__lt=current_revision.pk).exists()\n has_redo = versions.filter(revision__pk__gt=current_revision.pk).exists()\n else:\n has_redo = False\n has_undo = versions.count() > 1\n\n undo_action = admin_reverse('cms_page_undo', args=(self.page.pk,))\n redo_action = admin_reverse('cms_page_redo', args=(self.page.pk,))\n\n history_menu.add_ajax_item(_('Undo'), action=undo_action, disabled=not has_undo, on_success=refresh)\n history_menu.add_ajax_item(_('Redo'), action=redo_action, disabled=not has_redo, on_success=refresh)\n\n history_menu.add_break(HISTORY_MENU_BREAK)\n\n revert_action = admin_reverse('cms_page_revert_page', args=(self.page.pk, self.current_lang))\n revert_question = _('Are you sure you want to revert to live?')\n history_menu.add_ajax_item(_('Revert to live'), action=revert_action, question=revert_question,\n disabled=not self.page.is_dirty(self.current_lang), on_success=refresh)\n history_menu.add_modal_item(_('View history'), url=admin_reverse('cms_page_history', args=(self.page.pk,)))\n",
"path": "cms/cms_toolbar.py"
}
] | diff --git a/cms/cms_toolbar.py b/cms/cms_toolbar.py
index 9285aaf348d..992099bd4e3 100644
--- a/cms/cms_toolbar.py
+++ b/cms/cms_toolbar.py
@@ -274,8 +274,8 @@ def populate(self):
def post_template_populate(self):
self.init_placeholders_from_request()
- self.add_publish_button()
self.add_draft_live()
+ self.add_publish_button()
# Buttons
diff --git a/cms/static/cms/css/cms.base.css b/cms/static/cms/css/cms.base.css
index 537dcfcdf74..20c578b28ae 100644
--- a/cms/static/cms/css/cms.base.css
+++ b/cms/static/cms/css/cms.base.css
@@ -2,11 +2,11 @@
* @copyright: https://github.com/divio/django-cms
*/.cms_reset div,.cms_reset p,.cms_reset a,.cms_reset a:hover,.cms_reset a:active,.cms_reset a:focus,.cms_reset ul,.cms_reset li,.cms_reset form,.cms_reset fieldset,.cms_reset label,.cms_reset input,.cms_reset textarea{font:normal 13px/20px Helvetica,Arial,sans-serif;color:#222;text-decoration:none;text-align:left;outline:none;list-style-type:none;height:auto;padding:0;margin:0;border:none;background:none}#page_form_lang_tabs{position:relative}#page_form_lang_tabs input.language_button{background:#ccc}#page_form_lang_tabs input.selected{color:black;text-shadow:none;background:white}#page_form_lang_tabs input.notfilled{color:#bbb;background:none}#page_form_lang_tabs .lang_tabs_line{position:absolute;left:0;bottom:-5px;width:100%;height:5px;background:white}.cms_dialog{position:absolute;left:50%;top:50%;z-index:99999;width:500px;height:200px;padding:25px;margin:-100px 0 0 -275px;background:white;border:1px solid #ccc;-moz-border-radius:5px;-webkit-border-radius:5px;border-radius:5px}.cms_dialog h1{padding:0;margin:0 0 15px}.cms_dialog form{padding:15px 0;margin:15px 0;border-top:1px solid #f3f3f3}.cms_toolbar-noscroll{position:fixed;overflow-y:scroll;width:100%}/*!
* @copyright: https://github.com/divio/django-cms
- */#cms_toolbar .cms_loader{background:#fcfcfc url("../img/loader.gif") no-repeat center center !important}#cms_toolbar .cms_btn{color:#666;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;border:1px solid #e6e6e6;background:#e6e6e6;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2YyZjJmMiIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iI2U2ZTZlNiIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #f2f2f2),color-stop(100%, #e6e6e6));background-image:-moz-linear-gradient(top, #f2f2f2,#e6e6e6);background-image:-webkit-linear-gradient(top, #f2f2f2,#e6e6e6);background-image:linear-gradient(to bottom, #f2f2f2,#e6e6e6);-moz-box-shadow:inset #f2f2f2 0px 1px 0px;-webkit-box-shadow:inset #f2f2f2 0px 1px 0px;box-shadow:inset #f2f2f2 0px 1px 0px}#cms_toolbar .cms_btn:hover,#cms_toolbar .cms_btn:active,#cms_toolbar .cms_btn:focus{background:#e6e6e6;border:1px solid #e6e6e6;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn:active,#cms_toolbar .cms_btn:focus{border:1px solid #ccc;background:#ccc}#cms_toolbar .cms_btn-disabled{border-right:1px solid #ccc;border-top:1px solid #ccc;-moz-box-shadow:inset 0px 1px 0px #e6e6e6;-webkit-box-shadow:inset 0px 1px 0px #e6e6e6;box-shadow:inset 0px 1px 0px #e6e6e6;background:#ededed}#cms_toolbar .cms_btn-disabled:hover,#cms_toolbar .cms_btn-disabled:active,#cms_toolbar .cms_btn-disabled:focus{background-color:#e6e6e6}#cms_toolbar .cms_btn-active{color:white;border:1px solid #333 !important;border-bottom:none !important;-moz-box-shadow:inset 0 1px 0 #999;-webkit-box-shadow:inset 0 1px 0 #999;box-shadow:inset 0 1px 0 #999;background:#666;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzY2NjY2NiIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzMzMzMzMyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #666666),color-stop(100%, #333333));background-image:-moz-linear-gradient(top, #666666,#333333);background-image:-webkit-linear-gradient(top, #666666,#333333);background-image:linear-gradient(to bottom, #666666,#333333)}#cms_toolbar .cms_btn-active:hover,#cms_toolbar .cms_btn-active:active,#cms_toolbar .cms_btn-active:focus{background:#454545;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-active:active,#cms_toolbar .cms_btn-active:focus{background:#000}#cms_toolbar .cms_btn-action{color:white;border:1px solid #0e72ec !important;-moz-box-shadow:inset #3abcf3 0px 1px 0px;-webkit-box-shadow:inset #3abcf3 0px 1px 0px;box-shadow:inset #3abcf3 0px 1px 0px;background:#0eaaec;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzBlOTdlYyIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzBlNzJlYyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #0e97ec),color-stop(100%, #0e72ec));background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_btn-action:hover,#cms_toolbar .cms_btn-action:active,#cms_toolbar .cms_btn-action:focus{background:#0e72ec;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-action:active,#cms_toolbar .cms_btn-action:focus{background:#0b5bbc}#cms_toolbar .cms_btn-caution{color:white;border:1px solid #ff4000 !important;-moz-box-shadow:inset #f66 0px 1px 0px;-webkit-box-shadow:inset #f66 0px 1px 0px;box-shadow:inset #f66 0px 1px 0px;background:red;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2ZmMTUwMCIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iI2ZmNDAwMCIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #ff1500),color-stop(100%, #ff4000));background-image:-moz-linear-gradient(top, #ff1500,#ff4000);background-image:-webkit-linear-gradient(top, #ff1500,#ff4000);background-image:linear-gradient(to bottom, #ff1500,#ff4000)}#cms_toolbar .cms_btn-caution:hover,#cms_toolbar .cms_btn-caution:active,#cms_toolbar .cms_btn-caution:focus{background:#ff4000;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-caution:active,#cms_toolbar .cms_btn-caution:focus{background:#c30}#cms_toolbar .cms_btn-publish{display:none}#cms_toolbar .cms_btn-publish-active{display:block}#cms_toolbar .cms_tooltip{visibility:hidden;position:absolute;left:0;top:0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;font-size:11px;line-height:11px;font-weight:bold;white-space:nowrap;padding:5px 7px 7px 24px;margin:0;color:#e6e6e6;background:#454545 url("../img/toolbar/sprite_toolbar.png") no-repeat -78px -169px}#cms_toolbar .cms_tooltip span{float:right;position:absolute;right:0;top:20px;-moz-border-radius:3px 0 3px 3px;-webkit-border-radius:3px;border-radius:3px 0 3px 3px;color:white;font-weight:normal;padding:5px 7px;background:#454545}/*!
+ */#cms_toolbar .cms_loader{background:#fcfcfc url("../img/loader.gif") no-repeat center center !important}#cms_toolbar .cms_btn{color:#666;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;border:1px solid #e6e6e6;background:#e6e6e6;background-image:-moz-linear-gradient(top, #f2f2f2,#e6e6e6);background-image:-webkit-linear-gradient(top, #f2f2f2,#e6e6e6);background-image:linear-gradient(to bottom, #f2f2f2,#e6e6e6);-moz-box-shadow:inset #f2f2f2 0px 1px 0px;-webkit-box-shadow:inset #f2f2f2 0px 1px 0px;box-shadow:inset #f2f2f2 0px 1px 0px}#cms_toolbar .cms_btn:hover,#cms_toolbar .cms_btn:active,#cms_toolbar .cms_btn:focus{background:#e6e6e6;border:1px solid #e6e6e6;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn:active,#cms_toolbar .cms_btn:focus{border:1px solid #ccc;background:#ccc}#cms_toolbar .cms_btn-disabled{border-right:1px solid #ccc;border-top:1px solid #ccc;-moz-box-shadow:inset 0px 1px 0px #e6e6e6;-webkit-box-shadow:inset 0px 1px 0px #e6e6e6;box-shadow:inset 0px 1px 0px #e6e6e6;background:#ededed}#cms_toolbar .cms_btn-disabled:hover,#cms_toolbar .cms_btn-disabled:active,#cms_toolbar .cms_btn-disabled:focus{background-color:#e6e6e6}#cms_toolbar .cms_btn-active{color:white;border:1px solid #333 !important;border-bottom:none !important;-moz-box-shadow:inset 0 1px 0 #999;-webkit-box-shadow:inset 0 1px 0 #999;box-shadow:inset 0 1px 0 #999;background:#666;background-image:-moz-linear-gradient(top, #666666,#333333);background-image:-webkit-linear-gradient(top, #666666,#333333);background-image:linear-gradient(to bottom, #666666,#333333)}#cms_toolbar .cms_btn-active:hover,#cms_toolbar .cms_btn-active:active,#cms_toolbar .cms_btn-active:focus{background:#454545;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-active:active,#cms_toolbar .cms_btn-active:focus{background:#000}#cms_toolbar .cms_btn-action{color:white;border:1px solid #0e72ec !important;-moz-box-shadow:inset #3abcf3 0px 1px 0px;-webkit-box-shadow:inset #3abcf3 0px 1px 0px;box-shadow:inset #3abcf3 0px 1px 0px;background:#0eaaec;background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_btn-action:hover,#cms_toolbar .cms_btn-action:active,#cms_toolbar .cms_btn-action:focus{background:#0e72ec;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-action:active,#cms_toolbar .cms_btn-action:focus{background:#0b5bbc}#cms_toolbar .cms_btn-caution{color:white;border:1px solid #ff4000 !important;-moz-box-shadow:inset #f66 0px 1px 0px;-webkit-box-shadow:inset #f66 0px 1px 0px;box-shadow:inset #f66 0px 1px 0px;background:red;background-image:-moz-linear-gradient(top, #ff1500,#ff4000);background-image:-webkit-linear-gradient(top, #ff1500,#ff4000);background-image:linear-gradient(to bottom, #ff1500,#ff4000)}#cms_toolbar .cms_btn-caution:hover,#cms_toolbar .cms_btn-caution:active,#cms_toolbar .cms_btn-caution:focus{background:#ff4000;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_btn-caution:active,#cms_toolbar .cms_btn-caution:focus{background:#c30}#cms_toolbar .cms_btn-publish{display:none}#cms_toolbar .cms_btn-publish-active{display:block}#cms_toolbar .cms_tooltip{visibility:hidden;position:absolute;left:0;top:0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;font-size:11px;line-height:11px;font-weight:bold;white-space:nowrap;padding:5px 7px 7px 24px;margin:0;color:#e6e6e6;background:#454545 url("../img/toolbar/sprite_toolbar.png") no-repeat -78px -169px}#cms_toolbar .cms_tooltip span{float:right;position:absolute;right:0;top:20px;-moz-border-radius:3px 0 3px 3px;-webkit-border-radius:3px;border-radius:3px 0 3px 3px;color:white;font-weight:normal;padding:5px 7px;background:#454545}/*!
* @copyright: https://github.com/divio/django-cms
*/.cms_plugin{display:inline}.cms_plugin-active{outline:#0e72ec auto 4px}.cms_placeholder{height:0px;overflow:hidden}.cms_render_model_icon{display:inline-block;width:18px;height:18px;padding:0;margin:0;cursor:pointer}.cms_render_model_icon img{max-width:none;position:relative;padding:0 !important;margin:0 !important;background:url("../img/toolbar/render_model_icon.png") no-repeat}.cms_render_model_add{display:inline-block;width:18px;height:18px;padding:0;margin:0;cursor:pointer}.cms_render_model_add img{max-width:none;position:relative;padding:0 !important;margin:0 !important;background:url("../img/toolbar/render_model_add.png") no-repeat}/*!
* @copyright: https://github.com/divio/django-cms
- */#cms_toolbar{position:absolute;left:0;top:5px;z-index:9999999;width:100%}#cms_toolbar .cms_toolbar{display:none;position:fixed;left:0;top:0;z-index:999999;width:100%;min-width:320px;height:30px;border-bottom:1px solid #666 !important;background-color:white;background:rgba(250,250,250,0);background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2ZhZmFmYSIgc3RvcC1vcGFjaXR5PSIwLjk3Ii8+PHN0b3Agb2Zmc2V0PSI1MCUiIHN0b3AtY29sb3I9IiNmY2ZjZmMiIHN0b3Atb3BhY2l0eT0iMC45NyIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iI2ZhZmFmYSIgc3RvcC1vcGFjaXR5PSIwLjk1Ii8+PC9saW5lYXJHcmFkaWVudD48L2RlZnM+PHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgZmlsbD0idXJsKCNncmFkKSIgLz48L3N2Zz4g');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, rgba(250,250,250,0.97)),color-stop(50%, rgba(252,252,252,0.97)),color-stop(100%, rgba(250,250,250,0.95)));background-image:-moz-linear-gradient(top, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);background-image:-webkit-linear-gradient(top, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);background-image:linear-gradient(to bottom, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);-moz-box-shadow:0 0 5px rgba(0,0,0,0.2);-webkit-box-shadow:0 0 5px rgba(0,0,0,0.2);box-shadow:0 0 5px rgba(0,0,0,0.2);background/**/:#fcfcfc}#cms_toolbar .cms_toolbar .cms_toolbar-left{float:left;padding-left:10px;position:relative;z-index:10}#cms_toolbar .cms_toolbar .cms_toolbar-right{float:right;padding-right:32px;position:relative;z-index:10}#cms_toolbar .cms_toolbar .cms_toolbar-left .cms_toolbar-item{margin-left:10px}#cms_toolbar .cms_toolbar .cms_toolbar-right .cms_toolbar-item{margin-right:20px}#cms_toolbar .cms_toolbar .cms_toolbar-item{float:left}#cms_toolbar .cms_toolbar .cms_toolbar-item-buttons a{border-bottom:none !important}@media only screen and (max-width: 800px){#cms_toolbar .cms_toolbar-right{display:none}}#cms_toolbar.cms_toolbar-debug .cms_toolbar{top:5px !important}#cms_toolbar.cms_toolbar-debug .cms_toolbar-trigger{top:5px !important}#cms_toolbar.cms_toolbar-debug .cms_debug-bar{position:fixed;left:0;top:0;z-index:99999999;width:100%;height:4px;border-bottom:1px solid #ddd;background:#fdffc8 url("../img/toolbar/sprite_toolbar.png") repeat-x left -444px}#cms_toolbar.cms_toolbar-debug #container{padding-top:35px !important}#cms_toolbar .cms_toolbar-item-navigation li{float:left;position:relative;zoom:1}#cms_toolbar .cms_toolbar-item-navigation li a{float:left;padding:5px 10px;zoom:1;cursor:default}#cms_toolbar .cms_toolbar-item-navigation li ul{display:none}#cms_toolbar .cms_toolbar-item-navigation>li:first-child>a span{font-weight:800;line-height:12px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul{position:absolute;left:0;top:30px;display:block;min-width:180px;padding:4px 0;-moz-border-radius:0 0 4px 4px;-webkit-border-radius:0;border-radius:0 0 4px 4px;border:1px solid white;border-top:none;background-color:white;background:rgba(255,255,255,0.97);-moz-box-shadow:0 1px 1px rgba(0,0,0,0.4);-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.4);box-shadow:0 1px 1px rgba(0,0,0,0.4)}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul li{float:none;zoom:1}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul li a{float:none;display:block;zoom:1;cursor:pointer;white-space:nowrap;padding:2px 10px 2px 15px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul ul{-moz-border-radius:0 4px 4px 0;-webkit-border-radius:0;border-radius:0 4px 4px 0;border-top:1px solid #f5f5f5}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children ul{display:none;left:100%;top:-5px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children>a{cursor:default}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children>a span{display:block;background:url("../img/toolbar/sprite_toolbar.png") no-repeat right -270px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover>a{color:white;background:#0e72ec;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzBlOTdlYyIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzBlNzJlYyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #0e97ec),color-stop(100%, #0e72ec));background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover>a span{background-position:right -300px !important}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-active>a{font-weight:800}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-break{height:1px;margin:0 0 4px;padding:0 0 3px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;border-bottom:1px solid #e6e6e6}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a{cursor:default !important;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:hover,#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:active,#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:focus{-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none;background:none !important;color:black !important}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled ul{display:none !important}#cms_toolbar .cms_toolbar-item-cms-mode-switcher{display:none}#cms_toolbar .cms_messages{display:none;position:fixed;z-index:999999;top:30px;width:300px;min-height:14px;margin:0;padding:6px 10px 8px;background:rgba(0,0,0,0.74);-moz-border-radius:0 0 3px 3px;-webkit-border-radius:0;border-radius:0 0 3px 3px;color:white;font-size:12px;line-height:16px;font-weight:200}#cms_toolbar .cms_messages *{color:white;font-size:12px;line-height:16px;font-weight:200}#cms_toolbar .cms_messages a{color:#0eaaec}#cms_toolbar .cms_messages a:hover{text-decoration:underline}#cms_toolbar .cms_messages strong{color:#3abcf3;font-weight:200}#cms_toolbar .cms_messages ul{display:inline;color:white}#cms_toolbar .cms_messages ul li{display:inline;color:white;font-weight:200}#cms_toolbar .cms_messages .cms_messages-close{display:none;float:right;cursor:pointer;width:20px;height:14px;margin-left:10px;position:relative;top:-2px;left:3px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat -100px -90px}#cms_toolbar .cms_messages .cms_messages-close:hover{background-position:-120px -90px}#cms_toolbar .cms_messages-error strong{color:red}#cms_toolbar .cms_toolbar-item-logo{margin:0 !important}#cms_toolbar .cms_toolbar-item-logo a{display:block;width:92px;height:20px;margin:5px 0;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;background:url("../img/toolbar/sprite_toolbar.png") no-repeat left top}#cms_toolbar .cms_toolbar-item-logo a:hover,#cms_toolbar .cms_toolbar-item-logo a:active,#cms_toolbar .cms_toolbar-item-logo a:focus{background-position:left -20px}#cms_toolbar .cms_form-login{padding:3px 0 0 0}#cms_toolbar .cms_form-login label{float:left;cursor:pointer;padding-left:10px}#cms_toolbar .cms_form-login label span{padding-top:1px;display:inline-block;vertical-align:middle;*vertical-align:auto;*zoom:1;*display:inline}#cms_toolbar .cms_form-login input[type="text"],#cms_toolbar .cms_form-login input[type="password"]{font-size:13px;line-height:13px;width:100px;padding:3px 5px;margin:0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;color:#666;border:1px solid #d9d9d9;-moz-box-shadow:0px 1px 0px #fff;-webkit-box-shadow:0px 1px 0px #fff;box-shadow:0px 1px 0px #fff}#cms_toolbar .cms_form-login input[type="text"]:focus,#cms_toolbar .cms_form-login input[type="password"]:focus{border-color:#0eaaec;-moz-box-shadow:inset 0px 0px 2px #e6e6e6;-webkit-box-shadow:inset 0px 0px 2px #e6e6e6;box-shadow:inset 0px 0px 2px #e6e6e6;-moz-transition:outline,0.2s 1s;-o-transition:outline,0.2s 1s;-webkit-transition:outline,0.2s 1s;transition:outline 0.2s 1s}#cms_toolbar .cms_form-login input[type="submit"]{display:block;color:white;font-size:12px;text-transform:uppercase;cursor:pointer;height:23px;padding:1px 15px 0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;border:1px solid #333;background-color:#666;-moz-box-shadow:inset 0 1px 0 #999;-webkit-box-shadow:inset 0 1px 0 #999;box-shadow:inset 0 1px 0 #999;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzY2NjY2NiIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzMzMzMzMyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #666666),color-stop(100%, #333333));background-image:-moz-linear-gradient(top, #666666,#333333);background-image:-webkit-linear-gradient(top, #666666,#333333);background-image:linear-gradient(to bottom, #666666,#333333)}#cms_toolbar .cms_form-login input[type="submit"]:hover,#cms_toolbar .cms_form-login input[type="submit"]:active,#cms_toolbar .cms_form-login input[type="submit"]:focus{background:#454545;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_form-login input[type="submit"]:active,#cms_toolbar .cms_form-login input[type="submit"]:focus{background:#000}#cms_toolbar .cms_form-login .cms_error{color:red}#cms_toolbar .cms_form-login .cms_error input{border:1px solid red}#cms_toolbar .cms_toolbar-item-buttons{margin:4px 0 4px}#cms_toolbar .cms_toolbar-item-buttons a{float:left;font-size:11px;line-height:1;padding:5px 12px}#cms_toolbar .cms_toolbar-item-buttons a:first-child{-moz-border-radius:3px 0 0 3px;-webkit-border-radius:3px;border-radius:3px 0 0 3px}#cms_toolbar .cms_toolbar-item-buttons a:last-child{-moz-border-radius:0 3px 3px 0;-webkit-border-radius:0;border-radius:0 3px 3px 0}#cms_toolbar .cms_toolbar-item-buttons a:only-child{-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px}#cms_toolbar .cms_toolbar-trigger{position:fixed;right:0;top:0;z-index:999999;border-left:1px solid #666;border-bottom:1px solid #666}#cms_toolbar .cms_toolbar-trigger a{display:block;width:30px;height:29px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;color:#454545;border-left:1px solid white;border-top:1px solid white;background:#fafafa url("../img/toolbar/sprite_toolbar.png") no-repeat -60px -40px}#cms_toolbar .cms_toolbar-trigger a:hover,#cms_toolbar .cms_toolbar-trigger a:active,#cms_toolbar .cms_toolbar-trigger a:focus{background-position:-90px -40px;background-color:white}#cms_toolbar .cms_toolbar-trigger-expanded a{background-position:0 -40px}#cms_toolbar .cms_toolbar-trigger-expanded a:hover,#cms_toolbar .cms_toolbar-trigger-expanded a:active,#cms_toolbar .cms_toolbar-trigger-expanded a:focus{background-position:-30px -40px}#cms_toolbar .cms_toolbar-loader a{background:#fcfcfc url("../img/loader.gif") no-repeat center center !important;background-size:20px 20px !important}#cms_toolbar .cms_toolbar-item_switch{position:relative;left:0;top:0;margin:4px 0 4px;-moz-border-radius:20px;-webkit-border-radius:20px;border-radius:20px;border-top:1px solid #e6e6e6;background:#ededed;-moz-box-shadow:inset #e6e6e6 0px 1px 0px;-webkit-box-shadow:inset #e6e6e6 0px 1px 0px;box-shadow:inset #e6e6e6 0px 1px 0px}#cms_toolbar .cms_toolbar-item_switch:hover,#cms_toolbar .cms_toolbar-item_switch:active,#cms_toolbar .cms_toolbar-item_switch:focus{background-color:#e6e6e6}#cms_toolbar .cms_toolbar-item_switch a{float:left;position:relative;z-index:100;font-size:11px;line-height:11px;text-transform:uppercase;letter-spacing:1px;padding:6px 14px 4px 28px;color:black;text-shadow:0 1px 0 #fff}#cms_toolbar .cms_toolbar-item_switch .cms_toolbar-item_switch-knob{float:left;position:absolute;left:2px;top:1px;z-index:99;width:16px;height:16px;-moz-border-radius:16px;-webkit-border-radius:16px;border-radius:16px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;border:1px solid black;background:#454545;-moz-box-shadow:inset 0 1px 0 #999;-webkit-box-shadow:inset 0 1px 0 #999;box-shadow:inset 0 1px 0 #999;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzY2NjY2NiIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzMzMzMzMyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #666666),color-stop(100%, #333333));background-image:-moz-linear-gradient(top, #666666,#333333);background-image:-webkit-linear-gradient(top, #666666,#333333);background-image:linear-gradient(to bottom, #666666,#333333)}#cms_toolbar .cms_toolbar-item_switch .cms_toolbar-item_switch-on{display:none;position:relative;top:-1px}#cms_toolbar .cms_toolbar-item_switch .cms_toolbar-item_switch-off{display:inline;position:relative;top:-1px}#cms_toolbar .cms_toolbar-item_switch-active a{padding:6px 28px 4px 14px;color:#693}#cms_toolbar .cms_toolbar-item_switch-active .cms_toolbar-item_switch-knob{left:auto;right:2px;border:1px solid #80bf40;background:#80bf40;-moz-box-shadow:inset 0 1px 0 #b3d98c;-webkit-box-shadow:inset 0 1px 0 #b3d98c;box-shadow:inset 0 1px 0 #b3d98c;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzgwYmY0MCIvPjxzdG9wIG9mZnNldD0iNTAlIiBzdG9wLWNvbG9yPSIjNjZhZDFmIi8+PHN0b3Agb2Zmc2V0PSIxMDAlIiBzdG9wLWNvbG9yPSIjNjZiODE0Ii8+PC9saW5lYXJHcmFkaWVudD48L2RlZnM+PHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgZmlsbD0idXJsKCNncmFkKSIgLz48L3N2Zz4g');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #80bf40),color-stop(50%, #66ad1f),color-stop(100%, #66b814));background-image:-moz-linear-gradient(top, #80bf40 0%,#66ad1f 50%,#66b814 100%);background-image:-webkit-linear-gradient(top, #80bf40 0%,#66ad1f 50%,#66b814 100%);background-image:linear-gradient(to bottom, #80bf40 0%,#66ad1f 50%,#66b814 100%)}#cms_toolbar .cms_toolbar-item_switch-active .cms_toolbar-item_switch-on{display:inline}#cms_toolbar .cms_toolbar-item_switch-active .cms_toolbar-item_switch-off{display:none}#cms_toolbar .cms_toolbar-item_switch-highlight a{color:#0eaaec}#cms_toolbar .cms_toolbar-item_switch-highlight .cms_toolbar-item_switch-knob{border:1px solid #0b87bc;background:#3abcf3;-moz-box-shadow:inset 0 1px 0 #6accf6;-webkit-box-shadow:inset 0 1px 0 #6accf6;box-shadow:inset 0 1px 0 #6accf6;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzBlOTdlYyIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzBlNzJlYyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #0e97ec),color-stop(100%, #0e72ec));background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_screenblock{color:white;text-align:center;position:fixed;right:0;top:0;z-index:100;width:100%;height:100%;background-color:black;background:rgba(0,0,0,0.9)}#cms_toolbar .cms_screenblock .cms_screenblock-inner{margin-top:300px}#cms_toolbar .cms_screenblock .cms_screenblock-inner h1{font-size:28px;line-height:30px}#cms_toolbar .cms_screenblock .cms_screenblock-inner h1,#cms_toolbar .cms_screenblock .cms_screenblock-inner p{color:#a6a6a6;text-align:center}#cms_toolbar .cms_screenblock .cms_screenblock-inner a{color:white}#cms_toolbar .cms_screenblock .cms_screenblock-inner a:hover{text-decoration:underline}/*!
+ */#cms_toolbar{position:absolute;left:0;top:5px;z-index:9999999;width:100%}#cms_toolbar .cms_toolbar{display:none;position:fixed;left:0;top:0;z-index:999999;width:100%;min-width:320px;height:30px;border-bottom:1px solid #666 !important;background-color:white;background:rgba(250,250,250,0);background-image:-moz-linear-gradient(top, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);background-image:-webkit-linear-gradient(top, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);background-image:linear-gradient(to bottom, rgba(250,250,250,0.97) 0%,rgba(252,252,252,0.97) 50%,rgba(250,250,250,0.95) 100%);-moz-box-shadow:0 0 5px rgba(0,0,0,0.2);-webkit-box-shadow:0 0 5px rgba(0,0,0,0.2);box-shadow:0 0 5px rgba(0,0,0,0.2);background/**/:#fcfcfc}#cms_toolbar .cms_toolbar .cms_toolbar-left{float:left;padding-left:10px;position:relative;z-index:10}#cms_toolbar .cms_toolbar .cms_toolbar-right{float:right;padding-right:32px;position:relative;z-index:10}#cms_toolbar .cms_toolbar .cms_toolbar-left .cms_toolbar-item{margin-left:10px}#cms_toolbar .cms_toolbar .cms_toolbar-right .cms_toolbar-item{margin-right:20px}#cms_toolbar .cms_toolbar .cms_toolbar-item{float:left}#cms_toolbar .cms_toolbar .cms_toolbar-item-buttons a{border-bottom:none !important}@media only screen and (max-width: 800px){#cms_toolbar .cms_toolbar-right{display:none}}#cms_toolbar.cms_toolbar-debug .cms_toolbar{top:5px !important}#cms_toolbar.cms_toolbar-debug .cms_toolbar-trigger{top:5px !important}#cms_toolbar.cms_toolbar-debug .cms_debug-bar{position:fixed;left:0;top:0;z-index:99999999;width:100%;height:4px;border-bottom:1px solid #ddd;background:#fdffc8 url("../img/toolbar/sprite_toolbar.png") repeat-x left -444px}#cms_toolbar.cms_toolbar-debug #container{padding-top:35px !important}#cms_toolbar .cms_toolbar-item-navigation li{float:left;position:relative;zoom:1}#cms_toolbar .cms_toolbar-item-navigation li a{float:left;padding:5px 10px;zoom:1;cursor:default}#cms_toolbar .cms_toolbar-item-navigation li ul{display:none}#cms_toolbar .cms_toolbar-item-navigation>li:first-child>a span{font-weight:800;line-height:12px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul{position:absolute;left:0;top:30px;display:block;min-width:180px;padding:4px 0;-moz-border-radius:0 0 4px 4px;-webkit-border-radius:0;border-radius:0 0 4px 4px;border:1px solid white;border-top:none;background-color:white;background:rgba(255,255,255,0.97);-moz-box-shadow:0 1px 1px rgba(0,0,0,0.4);-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.4);box-shadow:0 1px 1px rgba(0,0,0,0.4)}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul li{float:none;zoom:1}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul li a{float:none;display:block;zoom:1;cursor:pointer;white-space:nowrap;padding:2px 10px 2px 15px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover ul ul{-moz-border-radius:0 4px 4px 0;-webkit-border-radius:0;border-radius:0 4px 4px 0;border-top:1px solid #f5f5f5}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children ul{display:none;left:100%;top:-5px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children>a{cursor:default}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-children>a span{display:block;background:url("../img/toolbar/sprite_toolbar.png") no-repeat right -270px}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover>a{color:white;background:#0e72ec;background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover>a span{background-position:right -300px !important}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-hover .cms_toolbar-item-navigation-active>a{font-weight:800}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-break{height:1px;margin:0 0 4px;padding:0 0 3px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;border-bottom:1px solid #e6e6e6}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a{cursor:default !important;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=20);opacity:0.2}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:hover,#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:active,#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled a:focus{-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none;background:none !important;color:black !important}#cms_toolbar .cms_toolbar-item-navigation .cms_toolbar-item-navigation-disabled ul{display:none !important}#cms_toolbar .cms_toolbar-item-cms-mode-switcher{display:none}#cms_toolbar .cms_messages{display:none;position:fixed;z-index:999999;top:30px;width:300px;min-height:14px;margin:0;padding:6px 10px 8px;background:rgba(0,0,0,0.74);-moz-border-radius:0 0 3px 3px;-webkit-border-radius:0;border-radius:0 0 3px 3px;color:white;font-size:12px;line-height:16px;font-weight:200}#cms_toolbar .cms_messages *{color:white;font-size:12px;line-height:16px;font-weight:200}#cms_toolbar .cms_messages a{color:#0eaaec}#cms_toolbar .cms_messages a:hover{text-decoration:underline}#cms_toolbar .cms_messages strong{color:#3abcf3;font-weight:200}#cms_toolbar .cms_messages ul{display:inline;color:white}#cms_toolbar .cms_messages ul li{display:inline;color:white;font-weight:200}#cms_toolbar .cms_messages .cms_messages-close{display:none;float:right;cursor:pointer;width:20px;height:14px;margin-left:10px;position:relative;top:-2px;left:3px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat -100px -90px}#cms_toolbar .cms_messages .cms_messages-close:hover{background-position:-120px -90px}#cms_toolbar .cms_messages-error strong{color:red}#cms_toolbar .cms_toolbar-item-logo{margin:0 !important}#cms_toolbar .cms_toolbar-item-logo a{display:block;width:92px;height:20px;margin:5px 0;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;background:url("../img/toolbar/sprite_toolbar.png") no-repeat left top}#cms_toolbar .cms_toolbar-item-logo a:hover,#cms_toolbar .cms_toolbar-item-logo a:active,#cms_toolbar .cms_toolbar-item-logo a:focus{background-position:left -20px}#cms_toolbar .cms_form-login{padding:3px 0 0 0}#cms_toolbar .cms_form-login label{float:left;cursor:pointer;padding-left:10px}#cms_toolbar .cms_form-login label span{padding-top:1px;display:inline-block;vertical-align:middle;*vertical-align:auto;*zoom:1;*display:inline}#cms_toolbar .cms_form-login input[type="text"],#cms_toolbar .cms_form-login input[type="password"]{font-size:13px;line-height:13px;width:100px;padding:3px 5px;margin:0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;color:#666;border:1px solid #d9d9d9;-moz-box-shadow:0px 1px 0px #fff;-webkit-box-shadow:0px 1px 0px #fff;box-shadow:0px 1px 0px #fff}#cms_toolbar .cms_form-login input[type="text"]:focus,#cms_toolbar .cms_form-login input[type="password"]:focus{border-color:#0eaaec;-moz-box-shadow:inset 0px 0px 2px #e6e6e6;-webkit-box-shadow:inset 0px 0px 2px #e6e6e6;box-shadow:inset 0px 0px 2px #e6e6e6;-moz-transition:outline,0.2s 1s;-webkit-transition:outline,0.2s 1s;transition:outline 0.2s 1s}#cms_toolbar .cms_form-login input[type="submit"]{display:block;color:white;font-size:12px;text-transform:uppercase;cursor:pointer;height:23px;padding:1px 15px 0;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;border:1px solid #333;background-color:#666;-moz-box-shadow:inset 0 1px 0 #999;-webkit-box-shadow:inset 0 1px 0 #999;box-shadow:inset 0 1px 0 #999;background-image:-moz-linear-gradient(top, #666666,#333333);background-image:-webkit-linear-gradient(top, #666666,#333333);background-image:linear-gradient(to bottom, #666666,#333333)}#cms_toolbar .cms_form-login input[type="submit"]:hover,#cms_toolbar .cms_form-login input[type="submit"]:active,#cms_toolbar .cms_form-login input[type="submit"]:focus{background:#454545;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none}#cms_toolbar .cms_form-login input[type="submit"]:active,#cms_toolbar .cms_form-login input[type="submit"]:focus{background:#000}#cms_toolbar .cms_form-login .cms_error{color:red}#cms_toolbar .cms_form-login .cms_error input{border:1px solid red}#cms_toolbar .cms_toolbar-item-buttons{margin:4px 0 4px}#cms_toolbar .cms_toolbar-item-buttons a{float:left;font-size:11px;line-height:1;padding:5px 12px}#cms_toolbar .cms_toolbar-item-buttons a:first-child{-moz-border-radius:3px 0 0 3px;-webkit-border-radius:3px;border-radius:3px 0 0 3px}#cms_toolbar .cms_toolbar-item-buttons a:last-child{-moz-border-radius:0 3px 3px 0;-webkit-border-radius:0;border-radius:0 3px 3px 0}#cms_toolbar .cms_toolbar-item-buttons a:only-child{-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px}#cms_toolbar .cms_toolbar-trigger{position:fixed;right:0;top:0;z-index:999999;border-left:1px solid #666;border-bottom:1px solid #666}#cms_toolbar .cms_toolbar-trigger a{display:block;width:30px;height:29px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;color:#454545;border-left:1px solid white;border-top:1px solid white;background:#fafafa url("../img/toolbar/sprite_toolbar.png") no-repeat -60px -40px}#cms_toolbar .cms_toolbar-trigger a:hover,#cms_toolbar .cms_toolbar-trigger a:active,#cms_toolbar .cms_toolbar-trigger a:focus{background-position:-90px -40px;background-color:white}#cms_toolbar .cms_toolbar-trigger-expanded a{background-position:0 -40px}#cms_toolbar .cms_toolbar-trigger-expanded a:hover,#cms_toolbar .cms_toolbar-trigger-expanded a:active,#cms_toolbar .cms_toolbar-trigger-expanded a:focus{background-position:-30px -40px}#cms_toolbar .cms_toolbar-loader a{background:#fcfcfc url("../img/loader.gif") no-repeat center center !important;background-size:20px 20px !important}#cms_toolbar .cms_screenblock{color:white;text-align:center;position:fixed;right:0;top:0;z-index:100;width:100%;height:100%;background-color:black;background:rgba(0,0,0,0.9)}#cms_toolbar .cms_screenblock .cms_screenblock-inner{margin-top:300px}#cms_toolbar .cms_screenblock .cms_screenblock-inner h1{font-size:28px;line-height:30px}#cms_toolbar .cms_screenblock .cms_screenblock-inner h1,#cms_toolbar .cms_screenblock .cms_screenblock-inner p{color:#a6a6a6;text-align:center}#cms_toolbar .cms_screenblock .cms_screenblock-inner a{color:white}#cms_toolbar .cms_screenblock .cms_screenblock-inner a:hover{text-decoration:underline}/*!
* @copyright: https://github.com/divio/django-cms
*/#cms_toolbar .cms_modal{display:none;position:fixed;left:50%;top:50%;z-index:999999;overflow:hidden;-moz-border-radius:5px;-webkit-border-radius:5px;border-radius:5px;-moz-box-shadow:0 0 20px rgba(0,0,0,0.5);-webkit-box-shadow:0 0 20px rgba(0,0,0,0.5);box-shadow:0 0 20px rgba(0,0,0,0.5);background:white}#cms_toolbar .cms_modal .cms_modal-body{position:relative;z-index:10;width:800px;height:400px;border-top:1px solid #e6e6e6;border-bottom:1px solid #e6e6e6}#cms_toolbar .cms_modal .cms_modal-foot{position:relative;height:32px;-moz-border-radius:0px 0px 5px 5px;-webkit-border-radius:0px;border-radius:0px 0px 5px 5px;clear:both;overflow:hidden;background:#fafafa}#cms_toolbar .cms_modal .cms_modal-shim{display:none;position:absolute;left:0;top:0;z-index:20;width:100%;height:100%}#cms_toolbar .cms_modal .cms_modal-frame{position:relative;z-index:10;width:100%;height:100%}#cms_toolbar .cms_modal .cms_modal-frame iframe{width:100%;height:100%}#cms_toolbar .cms_modal .cms_modal-title{display:block;font-size:13px;font-weight:bold;text-align:center;cursor:move;padding:4px 75px 3px;-moz-border-radius:5px 5px 0px 0px;-webkit-border-radius:5px;border-radius:5px 5px 0px 0px;color:#454545;background:#fafafa}#cms_toolbar .cms_modal .cms_modal-collapse,#cms_toolbar .cms_modal .cms_modal-close,#cms_toolbar .cms_modal .cms_modal-maximize{display:block;position:absolute;right:3px;top:3px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize;cursor:pointer;width:20px;height:20px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat left top}#cms_toolbar .cms_modal .cms_modal-collapse{right:40px;background-position:0 -70px}#cms_toolbar .cms_modal .cms_modal-collapse:hover,#cms_toolbar .cms_modal .cms_modal-collapse:active,#cms_toolbar .cms_modal .cms_modal-collapse:focus{background-position:-20px -70px}#cms_toolbar .cms_modal .cms_modal-collapsed{background-position:-100px -70px}#cms_toolbar .cms_modal .cms_modal-collapsed:hover,#cms_toolbar .cms_modal .cms_modal-collapsed:active,#cms_toolbar .cms_modal .cms_modal-collapsed:focus{background-position:-100px -70px}#cms_toolbar .cms_modal .cms_modal-maximize{right:22px;background-position:0 -90px}#cms_toolbar .cms_modal .cms_modal-maximize:hover,#cms_toolbar .cms_modal .cms_modal-maximize:active,#cms_toolbar .cms_modal .cms_modal-maximize:focus{background-position:-20px -90px}#cms_toolbar .cms_modal .cms_modal-maximize-active{background-position:-20px -90px !important}#cms_toolbar .cms_modal .cms_modal-close{background-position:-40px -70px}#cms_toolbar .cms_modal .cms_modal-close:hover,#cms_toolbar .cms_modal .cms_modal-close:active,#cms_toolbar .cms_modal .cms_modal-close:focus{background-position:-60px -70px}#cms_toolbar .cms_modal .cms_modal-resize{position:absolute;right:0;bottom:0;z-index:102;width:20px;height:20px;cursor:nw-resize;background:url("../img/toolbar/sprite_toolbar.png") no-repeat -117px -67px}#cms_toolbar .cms_modal .cms_modal-resize:hover{background-position:-137px -67px}#cms_toolbar .cms_modal .cms_modal-breadcrumb{display:none;float:left;font-size:12px;line-height:12px;position:relative;z-index:100;height:32px;min-width:225px;overflow:hidden;width:100%}#cms_toolbar .cms_modal .cms_modal-breadcrumb .cms_modal-breadcrumb-items{position:absolute;left:35px;top:0;width:9999px;background:#fcfcfc}#cms_toolbar .cms_modal .cms_modal-breadcrumb a{float:left;font-size:12px;line-height:12px;margin-left:-10px;position:relative;z-index:100;padding-right:10px;color:#666;background:url("../img/toolbar/sprite_toolbar.png") no-repeat right -200px}#cms_toolbar .cms_modal .cms_modal-breadcrumb a span{float:left;padding:10px 15px 10px 25px;color:black}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(1){z-index:100}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(2){z-index:80}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(3){z-index:70}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(4){z-index:60}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(5){z-index:50}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(6){z-index:40}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(7){z-index:30}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(8){z-index:20}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(9){z-index:10}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:nth-child(10){z-index:1}#cms_toolbar .cms_modal .cms_modal-breadcrumb a span,#cms_toolbar .cms_modal .cms_modal-breadcrumb .cms_modal-breadcrumb-title{float:left;position:relative;z-index:120;color:#666}#cms_toolbar .cms_modal .cms_modal-breadcrumb .cms_modal-breadcrumb-title{padding:10px 20px 10px 15px;border-right:1px solid #e6e6e6;-moz-border-radius:0 0 0 5px;-webkit-border-radius:0;border-radius:0 0 0 5px;background:#fff url("../img/toolbar/sprite_toolbar.png") no-repeat -133px -84px;text-indent:-119988px;overflow:hidden;text-align:left;text-transform:capitalize}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:hover{color:black;background-position:right -232px !important}#cms_toolbar .cms_modal .cms_modal-breadcrumb a:hover span{color:black;background-color:white}#cms_toolbar .cms_modal .cms_modal-breadcrumb .cms_modal-breadcrumb-last{cursor:default}#cms_toolbar .cms_modal .cms_modal-breadcrumb .cms_modal-breadcrumb-last span{color:#0eaaec}#cms_toolbar .cms_modal .cms_modal-buttons{position:absolute;right:0;top:0;z-index:101;float:right;padding:0 20px 0 10px;-moz-border-radius:0 0 5px 0;-webkit-border-radius:0;border-radius:0 0 5px 0;background:#fcfcfc}#cms_toolbar .cms_modal .cms_modal-buttons div{float:right;font-size:12px;cursor:pointer;padding:2px 10px;margin:3px 5px 3px 0}/*!
* @copyright: https://github.com/divio/django-cms
@@ -16,4 +16,4 @@
* @copyright: https://github.com/divio/django-cms
*/#cms_toolbar .cms_structure{display:none;position:absolute;top:0;right:0;width:100%;height:100%;z-index:9999}#cms_toolbar .cms_structure .cms_structure-dimmer{display:none;position:fixed;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:10;background:rgba(255,255,255,0.95)}#cms_toolbar .cms_structure .cms_structure-content{position:absolute;left:0;top:0;z-index:100;width:100%;height:100%}#cms_toolbar .cms_structure .cms_dragarea{position:absolute;padding:5px 5px 4px;margin:0 0 5px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;background:#454545;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}#cms_toolbar .cms_structure .cms_dragarea-static{background:#454545 url("../img/toolbar/pattern.png")}#cms_toolbar .cms_structure .cms_dragbar{font-size:13px;line-height:20px;position:relative;left:0;top:0;z-index:9999;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px}#cms_toolbar .cms_structure .cms_dragbar .cms_dragbar-title{font-size:12px;line-height:17px;text-transform:uppercase;font-weight:500;padding:0 0 0 15px;height:16px;cursor:pointer;color:white;text-shadow:0px 1px 0px #000}#cms_toolbar .cms_structure .cms_dragbar .cms_dragbar-title:before{content:" ";position:absolute;left:0;top:0;width:16px;height:15px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat -85px -113px}#cms_toolbar .cms_structure .cms_dragbar .cms_dragbar-title:hover:before{background-position:-105px -113px}#cms_toolbar .cms_structure .cms_dragbar .cms_dragbar-title-expanded:before{background-position:-124px -114px}#cms_toolbar .cms_structure .cms_dragbar .cms_dragbar-title-expanded:hover:before{background-position:-144px -114px !important}#cms_toolbar .cms_structure .cms_dragbar-empty{font-size:11px;text-transform:uppercase;padding-top:0;padding-bottom:0}#cms_toolbar .cms_structure .cms_dragbar-empty-wrapper{display:none}#cms_toolbar .cms_structure .cms_draggables{list-style-type:none;padding:0;margin:0}#cms_toolbar .cms_structure .cms_draggables .cms_draggables{display:none;min-height:25px;padding-left:6px}#cms_toolbar .cms_structure .cms_draggables .cms_draggables>.cms_draggable:first-child,#cms_toolbar .cms_structure .cms_draggables .cms_draggables>.cms_draggable:only-child,#cms_toolbar .cms_structure .cms_draggable>.cms_draggable{margin-top:0}#cms_toolbar .cms_structure .cms_draggables>.cms_draggable:last-child{margin-bottom:1px}#cms_toolbar .cms_structure .cms_draggables .cms_draggables>.cms_draggable:last-child{margin-bottom:2px}#cms_toolbar .cms_structure .cms_draggable,#cms_toolbar .cms_structure .cms_droppable{list-style-type:none;position:relative;left:0;top:0;z-index:99;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;padding:4px 5px 3px 5px;margin:5px 0 0;margin-left:0 !important}#cms_toolbar .cms_structure .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable{position:relative;z-index:99;white-space:nowrap;border-color:#e6e6e6;background:white}#cms_toolbar .cms_structure .cms_draggable .cms_draggable:hover,#cms_toolbar .cms_structure .cms_droppable .cms_draggable:hover{border-color:#a6a6a6}#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable{background:#fafafa}#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable{background:white}#cms_toolbar .cms_structure .cms_draggable .cms_submenu,#cms_toolbar .cms_structure .cms_droppable .cms_submenu{display:none;margin-top:2px}#cms_toolbar .cms_structure .cms_draggable .cms_submenu-dropdown,#cms_toolbar .cms_structure .cms_droppable .cms_submenu-dropdown{right:-6px;top:22px}#cms_toolbar .cms_structure .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_submenu-quicksearch{right:-5px;top:-6px;-moz-border-radius:0;-webkit-border-radius:0;border-radius:0;height:28px;border-left:1px dotted #e6e6e6;background:#fafafa url("../img/toolbar/sprite_toolbar.png") no-repeat right -415px}#cms_toolbar .cms_structure .cms_draggable .cms_submenu-quicksearch input,#cms_toolbar .cms_structure .cms_droppable .cms_submenu-quicksearch input{color:black;margin-top:1px}#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch{background-color:white}#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch,#cms_toolbar .cms_structure .cms_droppable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_draggable .cms_submenu-quicksearch{background-color:#fafafa}#cms_toolbar .cms_structure .cms_draggable .cms_dragitem-text,#cms_toolbar .cms_structure .cms_droppable .cms_dragitem-text{display:inline-block;vertical-align:middle;*vertical-align:auto;*zoom:1;*display:inline;width:90%;height:21px;overflow:hidden}#cms_toolbar .cms_structure .cms_draggable{z-index:100;color:black;border:1px solid #fafafa;background:#fafafa}#cms_toolbar .cms_structure .cms_draggable:hover{-moz-box-shadow:inset 0px 0px 3px #e6e6e6;-webkit-box-shadow:inset 0px 0px 3px #e6e6e6;box-shadow:inset 0px 0px 3px #e6e6e6}#cms_toolbar .cms_structure .cms_droppable{-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;color:#bfbfbf;border:1px dashed #bfbfbf}#cms_toolbar .cms_structure .cms_dragitem{cursor:move}#cms_toolbar .cms_structure .cms_dragitem-collapsable,#cms_toolbar .cms_structure .cms_dragitem-expanded{cursor:pointer;padding-left:15px}#cms_toolbar .cms_structure .cms_dragitem-collapsable{background:url("../img/toolbar/sprite_toolbar.png") no-repeat 1px -359px}#cms_toolbar .cms_structure .cms_dragitem-expanded{background:url("../img/toolbar/sprite_toolbar.png") no-repeat 0 -389px}#cms_toolbar .cms_structure .cms_dragitem-success{position:absolute;left:-1px;top:-1px;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;width:100%;height:100%;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=60);opacity:0.6}#cms_toolbar .cms_structure .cms_draggable-selected .cms_dragitem,#cms_toolbar .cms_structure .cms_draggable-selected .cms_dragitem strong{color:#0e72ec}#cms_toolbar .cms_structure .cms_draggable-selected .cms_draggable .cms_dragitem,#cms_toolbar .cms_structure .cms_draggable-selected .cms_draggable .cms_dragitem strong{color:black}#cms_toolbar .cms_structure .cms_draggable-allowed,#cms_toolbar .cms_structure .cms_draggable-hover-allowed,#cms_toolbar .cms_structure .cms_draggable-placeholder{color:#cce6b3;border-color:#cce6b3}#cms_toolbar .cms_structure .cms_draggable-hover-allowed,#cms_toolbar .cms_structure .cms_draggable-placeholder{color:white;background:rgba(102,153,51,0.2)}#cms_toolbar .cms_structure .cms_dragitem-success{border:1px solid #cce6b3;background:#cce6b3}#cms_toolbar .cms_structure .cms_draggable-disallowed,#cms_toolbar .cms_structure .cms_draggable-hover-disallowed{color:red;border:1px dashed red;background:rgba(255,0,0,0.1)}#cms_toolbar .cms_structure .cms_draggable-disabled>.cms_dragitem-collapsable{background:none !important;padding-left:0}#cms_toolbar .cms_structure .cms_draggable-disabled .cms_draggables{display:none !important}body>.cms_draggable{list-style-type:none;white-space:nowrap;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;padding:4px 5px 3px 5px;margin:0;border-color:#e6e6e6;background:white}body>.cms_draggable .cms_switcher{display:none !important}body>.cms_draggable .cms_submenu{display:none !important}body>.cms_draggable .cms_draggables{display:none !important}/*!
* @copyright: https://github.com/divio/django-cms
- */#cms_toolbar .cms_submenu{display:block;width:20px;height:15px;cursor:pointer;position:absolute;right:5px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat 3px -152px}#cms_toolbar .cms_submenu-lang{padding:0 5px;position:absolute;top:3px;right:3px;border:1px solid #e6e6e6;background:white;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px}#cms_toolbar .cms_submenu-dropdown{display:none;zoom:1;position:absolute;right:0;top:20px;z-index:999;min-width:140px;max-height:230px;overflow:auto;border:1px solid #e6e6e6;background:white;-moz-box-shadow:0 1px 1px rgba(0,0,0,0.1);-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.1);box-shadow:0 1px 1px rgba(0,0,0,0.1)}#cms_toolbar .cms_submenu-dropdown::-webkit-scrollbar{-webkit-appearance:none;width:7px;background:#e6e6e6}#cms_toolbar .cms_submenu-dropdown::-webkit-scrollbar-thumb{background-color:#454545;border-left:1px solid #e6e6e6;-moz-box-shadow:0 0 1px rgba(255,255,255,0.5);-webkit-box-shadow:0 0 1px rgba(255,255,255,0.5);box-shadow:0 0 1px rgba(255,255,255,0.5)}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item{zoom:1}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a,#cms_toolbar .cms_submenu-dropdown span{display:block;font-size:12px;line-height:15px;text-align:left;padding:4px 8px 3px 8px}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a{color:black}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:hover,#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:focus{color:white;background:#0e72ec;background-image:url('data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4gPHN2ZyB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGRlZnM+PGxpbmVhckdyYWRpZW50IGlkPSJncmFkIiBncmFkaWVudFVuaXRzPSJvYmplY3RCb3VuZGluZ0JveCIgeDE9IjAuNSIgeTE9IjAuMCIgeDI9IjAuNSIgeTI9IjEuMCI+PHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzBlOTdlYyIvPjxzdG9wIG9mZnNldD0iMTAwJSIgc3RvcC1jb2xvcj0iIzBlNzJlYyIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxMDAlIiBoZWlnaHQ9IjEwMCUiIGZpbGw9InVybCgjZ3JhZCkiIC8+PC9zdmc+IA==');background-size:100%;background-image:-webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #0e97ec),color-stop(100%, #0e72ec));background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:first-child{border-top:none}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item span{cursor:default;font-weight:bold;color:black;border-top:1px solid #a6a6a6;border-bottom:1px solid #e6e6e6}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item:first-child span{border-top:none}#cms_toolbar .cms_submenu-quicksearch{display:none;position:absolute;right:-5px;top:-5px;z-index:1000;cursor:default;text-align:right;height:25px;-moz-border-radius:4px;-webkit-border-radius:4px;border-radius:4px;background:#454545 url("../img/toolbar/sprite_toolbar.png") no-repeat right -326px}#cms_toolbar .cms_submenu-quicksearch label{cursor:pointer}#cms_toolbar .cms_submenu-quicksearch input{display:block;font-size:12px;color:white;text-align:right;-webkit-appearance:none;width:109px;height:20px;padding:3px 1px 1px 5px;margin-right:25px;border:none;background:none}#cms_toolbar .cms_submenu-scroll-hint{display:none;color:#a6a6a6;font-size:12px;line-height:1;text-align:center;position:absolute;left:0;bottom:0;width:100%;padding:5px 0 4px;background-color:#e6e6e6}@media print, (-o-min-device-pixel-ratio: 5 / 4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 1.25dppx){.cms_toolbar-item-navigation-children>a span,.cms_sideframe-btn div,.cms_clipboard ul a,.cms_clipboard-empty a,.cms_messages .cms_messages-close,.cms_modal-collapse,.cms_modal-close,.cms_modal-maximize,.cms_modal-resize,.cms_modal-breadcrumb a,.cms_modal-breadcrumb-title,.cms_toolbar-item-logo a,.cms_toolbar-trigger a,.cms_tooltip,.cms_placeholders-menu,.cms_toolbar-debug .cms_debug-bar{background-image:url("../img/toolbar/[email protected]") !important;background-size:190px !important}#cms_toolbar .cms_loader{background-image:url("../img/[email protected]") !important;background-size:32px !important}.cms_submenu,.cms_submenu-quicksearch,.cms_placeholder-title:before,.cms_placeholder .cms_dragitem-collapsable,.cms_placeholder .cms_dragitem-collapsed{background-image:url("../img/toolbar/[email protected]") !important;background-size:190px !important}}
+ */#cms_toolbar .cms_submenu{display:block;width:20px;height:15px;cursor:pointer;position:absolute;right:5px;background:url("../img/toolbar/sprite_toolbar.png") no-repeat 3px -152px}#cms_toolbar .cms_submenu-lang{padding:0 5px;position:absolute;top:3px;right:3px;border:1px solid #e6e6e6;background:white;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px}#cms_toolbar .cms_submenu-dropdown{display:none;zoom:1;position:absolute;right:0;top:20px;z-index:999;min-width:140px;max-height:230px;overflow:auto;border:1px solid #e6e6e6;background:white;-moz-box-shadow:0 1px 1px rgba(0,0,0,0.1);-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.1);box-shadow:0 1px 1px rgba(0,0,0,0.1)}#cms_toolbar .cms_submenu-dropdown::-webkit-scrollbar{-webkit-appearance:none;width:7px;background:#e6e6e6}#cms_toolbar .cms_submenu-dropdown::-webkit-scrollbar-thumb{background-color:#454545;border-left:1px solid #e6e6e6;-moz-box-shadow:0 0 1px rgba(255,255,255,0.5);-webkit-box-shadow:0 0 1px rgba(255,255,255,0.5);box-shadow:0 0 1px rgba(255,255,255,0.5)}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item{zoom:1}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a,#cms_toolbar .cms_submenu-dropdown span{display:block;font-size:12px;line-height:15px;text-align:left;padding:4px 8px 3px 8px}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a{color:black}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:hover,#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:focus{color:white;background:#0e72ec;background-image:-moz-linear-gradient(top, #0e97ec,#0e72ec);background-image:-webkit-linear-gradient(top, #0e97ec,#0e72ec);background-image:linear-gradient(to bottom, #0e97ec,#0e72ec)}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item a:first-child{border-top:none}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item span{cursor:default;font-weight:bold;color:black;border-top:1px solid #a6a6a6;border-bottom:1px solid #e6e6e6}#cms_toolbar .cms_submenu-dropdown .cms_submenu-item:first-child span{border-top:none}#cms_toolbar .cms_submenu-quicksearch{display:none;position:absolute;right:-5px;top:-5px;z-index:1000;cursor:default;text-align:right;height:25px;-moz-border-radius:4px;-webkit-border-radius:4px;border-radius:4px;background:#454545 url("../img/toolbar/sprite_toolbar.png") no-repeat right -326px}#cms_toolbar .cms_submenu-quicksearch label{cursor:pointer}#cms_toolbar .cms_submenu-quicksearch input{display:block;font-size:12px;color:white;text-align:right;-webkit-appearance:none;width:109px;height:20px;padding:3px 1px 1px 5px;margin-right:25px;border:none;background:none}#cms_toolbar .cms_submenu-scroll-hint{display:none;color:#a6a6a6;font-size:12px;line-height:1;text-align:center;position:absolute;left:0;bottom:0;width:100%;padding:5px 0 4px;background-color:#e6e6e6}@media print, (-o-min-device-pixel-ratio: 5 / 4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 1.25dppx){.cms_toolbar-item-navigation-children>a span,.cms_sideframe-btn div,.cms_clipboard ul a,.cms_clipboard-empty a,.cms_messages .cms_messages-close,.cms_modal-collapse,.cms_modal-close,.cms_modal-maximize,.cms_modal-resize,.cms_modal-breadcrumb a,.cms_modal-breadcrumb-title,.cms_toolbar-item-logo a,.cms_toolbar-trigger a,.cms_tooltip,.cms_placeholders-menu,.cms_toolbar-debug .cms_debug-bar{background-image:url("../img/toolbar/[email protected]") !important;background-size:190px !important}#cms_toolbar .cms_loader{background-image:url("../img/[email protected]") !important;background-size:32px !important}.cms_submenu,.cms_submenu-quicksearch,.cms_placeholder-title:before,.cms_placeholder .cms_dragitem-collapsable,.cms_placeholder .cms_dragitem-collapsed{background-image:url("../img/toolbar/[email protected]") !important;background-size:190px !important}}
diff --git a/cms/static/cms/sass/includes/_toolbar.scss b/cms/static/cms/sass/includes/_toolbar.scss
index af3647aaf67..c5b55d5aebd 100644
--- a/cms/static/cms/sass/includes/_toolbar.scss
+++ b/cms/static/cms/sass/includes/_toolbar.scss
@@ -180,45 +180,7 @@ position:absolute; left:0; top:5px; z-index:9999999; width:100%;
.cms_toolbar-loader a { background:#fcfcfc url('../img/loader.gif') no-repeat center center !important;
background-size:20px 20px !important; }
-// #TOOLBAR/elements/switch#
-.cms_toolbar-item_switch { position:relative; left:0; top:0; margin:4px 0 4px; @include border-radius(20px);
- border-top:1px solid $color-grey-10; background:darken($color-grey-5, 5%);
- @include box-shadow(inset $color-grey-10 0px 1px 0px);
- &:hover, &:active, &:focus { background-color:$color-grey-10; }
-
- a { float:left; position:relative; z-index:100; font-size:11px; line-height:11px;
- text-transform:uppercase; letter-spacing:1px; padding:6px 14px 4px 28px;
- color:black; @include text-shadow(0 1px 0 white); }
- .cms_toolbar-item_switch-knob {
- float:left; position:absolute; left:2px; top:1px; z-index:99; width:16px; height:16px;
- @include border-radius(16px); @include hide-text();
-
- border:1px solid black; background:$color-grey-70;
- @include box-shadow(inset 0 1px 0 lighten($color-grey, 20%));
- @include background-image($gradient-dark);
- }
- .cms_toolbar-item_switch-on { display:none; position:relative; top:-1px; }
- .cms_toolbar-item_switch-off { display:inline; position:relative; top:-1px; }
-}
-.cms_toolbar-item_switch-active {
- a { padding:6px 28px 4px 14px; color:$color-green; }
- .cms_toolbar-item_switch-knob { left:auto; right:2px;
- border:1px solid lighten($color-green, 10%); background:lighten($color-green, 10%);
- @include box-shadow(inset 0 1px 0 lighten($color-green, 30%));
- @include background-image($gradient-green); }
- .cms_toolbar-item_switch-on { display:inline; }
- .cms_toolbar-item_switch-off { display:none; }
-}
-// highlight
-.cms_toolbar-item_switch-highlight {
- a { color:$color-blue; }
- .cms_toolbar-item_switch-knob {
- border:1px solid darken($color-blue, 10%); background:lighten($color-blue, 10%);
- @include box-shadow(inset 0 1px 0 lighten($color-blue, 20%));
- @include background-image($gradient-blue);
- }
-}
-
+// TODO Reimplement blinking if unpublished content is present
//##################################################################################################################
// #TOOLBAR/blocker#
.cms_screenblock { color:white; text-align:center;
@@ -234,4 +196,4 @@ position:absolute; left:0; top:5px; z-index:9999999; width:100%;
}
// end of toolbar
-}
\ No newline at end of file
+}
diff --git a/cms/templates/cms/toolbar/items/live_draft.html b/cms/templates/cms/toolbar/items/live_draft.html
index 6d731f02751..19e27f12ad9 100644
--- a/cms/templates/cms/toolbar/items/live_draft.html
+++ b/cms/templates/cms/toolbar/items/live_draft.html
@@ -1,8 +1,13 @@
-{% load i18n %}
-<div class="cms_toolbar-item cms_toolbar-item_switch{% if not request.toolbar.edit_mode %} cms_toolbar-item_switch-active{% endif %}">
- <a href="{% if request.toolbar.edit_mode %}?{{ request.toolbar.edit_mode_url_off }}{% else %}?{{ request.toolbar.edit_mode_url_on }}{% endif %}">
- <span class="cms_toolbar-item_switch-on">{% trans "Live" %}</span>
- <span class="cms_toolbar-item_switch-off">{% trans "Draft" %}</span>
+{% load i18n %}{% spaceless %}
+<div class="cms_toolbar-item cms_toolbar-item-buttons cms_toolbar-item_switch_save-edit">
+ {% if request.toolbar.edit_mode %}
+ <a class="cms_btn cms_btn-switch-save" href="?{{ request.toolbar.edit_mode_url_off }}">
+ {% trans "Save and close" %}
</a>
- <span class="cms_toolbar-item_switch-knob">{% trans "Change" %}</span>
+ {% else %}
+ <a class="cms_btn cms_btn-active cms_btn-switch-edit" href="?{{ request.toolbar.edit_mode_url_on }}">
+ {% trans "Edit" %}
+ </a>
+ {% endif %}
</div>
+{% endspaceless %}
diff --git a/cms/templates/cms/toolbar/toolbar.html b/cms/templates/cms/toolbar/toolbar.html
index 4133350bede..904936b3903 100644
--- a/cms/templates/cms/toolbar/toolbar.html
+++ b/cms/templates/cms/toolbar/toolbar.html
@@ -27,15 +27,6 @@
{% for item in request.toolbar.get_right_items %}
{{ item.render }}
{% endfor %}
- {% if request.toolbar.can_change %}
- <div class="cms_toolbar-item cms_toolbar-item_switch{% if not request.toolbar.edit_mode %} cms_toolbar-item_switch-active{% endif %}">
- <a href="{% if request.toolbar.edit_mode %}?{{ request.toolbar.edit_mode_url_off }}{% else %}?{{ request.toolbar.edit_mode_url_on }}{% endif %}">
- <span class="cms_toolbar-item_switch-on">{% trans "Live" %}</span>
- <span class="cms_toolbar-item_switch-off">{% trans "Draft" %}</span>
- </a>
- <span class="cms_toolbar-item_switch-knob">{% trans "Change" %}</span>
- </div>
- {% endif %}
</div>
</div>
<div class="cms_toolbar-trigger"><a href="#">{% trans "Toggle toolbar" %}</a></div>
diff --git a/cms/tests/views.py b/cms/tests/views.py
index dd6cc3193e6..932f70845a9 100644
--- a/cms/tests/views.py
+++ b/cms/tests/views.py
@@ -149,13 +149,13 @@ def test_edit_permission(self):
page = create_page("page", "nav_playground.html", "en", published=True)
# Anon user
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
- self.assertNotContains(response, "cms_toolbar-item_switch", 200)
+ self.assertNotContains(response, "cms_toolbar-item_switch_save-edit", 200)
# Superuser
user = self.get_superuser()
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
- self.assertContains(response, "cms_toolbar-item_switch", 4, 200)
+ self.assertContains(response, "cms_toolbar-item_switch_save-edit", 1, 200)
# Admin but with no permission
user = self.get_staff_user_with_no_permissions()
@@ -163,12 +163,12 @@ def test_edit_permission(self):
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
- self.assertNotContains(response, "cms_toolbar-item_switch", 200)
+ self.assertNotContains(response, "cms_toolbar-item_switch_save-edit", 200)
PagePermission.objects.create(can_change=True, user=user, page=page)
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
- self.assertContains(response, "cms_toolbar-item_switch", 4, 200)
+ self.assertContains(response, "cms_toolbar-item_switch_save-edit", 1, 200)
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls')
|
scikit-image__scikit-image-3790 | 0.14.2 test suite fails with `NameError: global name 'osp'`
## Description
The test suite does not pass. As far as I know `osp` is a common alias for `os.path`. Is this a typo in the code? Or related to the base python version?
## Way to reproduce
```python
pytest -vv
```
## Version information
```python
2.7.16 (default, Mar 4 2019, 19:30:43)
[GCC 8.2.0]
Linux-4.20.2-gentoo-x86_64-Intel-R-_Core-TM-_i7-8550U_CPU_@_1.80GHz-with-gentoo-2.6
scikit-image version: 0.14.2
numpy version: 1.16.1
```
OR
```python
3.6.8 (default, Mar 4 2019, 19:32:41)
[GCC 8.2.0]
Linux-4.20.2-gentoo-x86_64-Intel-R-_Core-TM-_i7-8550U_CPU_@_1.80GHz-with-gentoo-2.6
scikit-image version: 0.14.2
numpy version: 1.16.1
```
## My output
[build.log](https://github.com/scikit-image/scikit-image/files/2937545/build.log)
| [
{
"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\n Is similar to `img_as_float64`, but will not convert lower-precision\n floating point arrays to `float64`.\nimg_as_float32\n Convert an image to single-precision (32-bit) floating point format,\n with values in [0, 1].\nimg_as_float64\n Convert an image to double-precision (64-bit) floating point format,\n with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\nimg_as_bool\n Convert an image to boolean format, with values either True or False.\ndtype_limits\n Return intensity limits, i.e. (min, max) tuple, of the image's dtype.\n\n\"\"\"\n\nimport imp\nimport functools\nimport warnings\nimport sys\n\n__version__ = '0.14.3'\n\n\ntry:\n imp.find_module('pytest')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but pytest couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load pytest. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import pytest\n import warnings\n args = ['--pyargs', 'skimage']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--doctest-modules'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = pytest.main(args)\n else:\n success = pytest.main(args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n # All skimage root imports go here\n from .util.dtype import (img_as_float32,\n img_as_float64,\n img_as_float,\n img_as_int,\n img_as_uint,\n img_as_ubyte,\n img_as_bool,\n dtype_limits)\n\n from .util.lookfor import lookfor\n from .data import data_dir\n\n\ndel warnings, functools, imp, sys\n",
"path": "skimage/__init__.py"
}
] | [
{
"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\n Is similar to `img_as_float64`, but will not convert lower-precision\n floating point arrays to `float64`.\nimg_as_float32\n Convert an image to single-precision (32-bit) floating point format,\n with values in [0, 1].\nimg_as_float64\n Convert an image to double-precision (64-bit) floating point format,\n with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\nimg_as_bool\n Convert an image to boolean format, with values either True or False.\ndtype_limits\n Return intensity limits, i.e. (min, max) tuple, of the image's dtype.\n\n\"\"\"\n\nimport imp\nimport functools\nimport warnings\nimport sys\n\n__version__ = '0.14.2'\n\n\ntry:\n imp.find_module('pytest')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but pytest couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load pytest. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import pytest\n import warnings\n args = ['--pyargs', 'skimage']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--doctest-modules'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = pytest.main(args)\n else:\n success = pytest.main(args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n import os.path as osp\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n # All skimage root imports go here\n from .util.dtype import (img_as_float32,\n img_as_float64,\n img_as_float,\n img_as_int,\n img_as_uint,\n img_as_ubyte,\n img_as_bool,\n dtype_limits)\n\n from .util.lookfor import lookfor\n from .data import data_dir\n\n\ndel warnings, functools, imp, sys\n",
"path": "skimage/__init__.py"
}
] | diff --git a/skimage/__init__.py b/skimage/__init__.py
index 52c8a3af409..d77fd21e9a0 100644
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -135,6 +135,7 @@ def _test(doctest=False, verbose=False):
def _raise_build_error(e):
# Raise a comprehensible error
+ import os.path as osp
local_dir = osp.split(__file__)[0]
msg = _STANDARD_MSG
if local_dir == "skimage":
|
pytorch__vision-2933 | Change default value of eps in FrozenBatchNorm to match BatchNorm
## ❓ Questions and Help
Hello
Loss is nan error occurs when I learn fast rcnn with resnext101 backbone
My code is as follows
```python
backbone = resnet_fpn_backbone('resnext101_32x8d', pretrained=True)
model = FasterRCNN(backbone, num_classes)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
```
error message
```
Epoch: [0] [ 0/7208] eta: 1:27:42 lr: 0.000040 loss: 40613806080.0000 (40613806080.0000) loss_box_reg: 7979147264.0000 (7979147264.0000) loss_classifier: 11993160704.0000 (11993160704.0000) loss_objectness: 9486380032.0000 (9486380032.0000) loss_rpn_box_reg: 11155118080.0000 (11155118080.0000) time: 0.7301 data: 0.4106 max mem: 1241
Loss is nan, stopping training
```
When i change the backbone to resnet50 and resnet152, no error occrus.
### Please note that this issue tracker is not a help form and this issue will be closed.
We have a set of [listed resources available on the website](https://pytorch.org/resources). Our primary means of support is our discussion forum:
- [Discussion Forum](https://discuss.pytorch.org/)
| [
{
"content": "\"\"\"\nhelper class that supports empty tensors on some nn functions.\n\nIdeally, add support directly in PyTorch to empty tensors in\nthose functions.\n\nThis can be removed once https://github.com/pytorch/pytorch/issues/12013\nis implemented\n\"\"\"\n\nimport warnings\nimport torch\nfrom torch import Tensor, Size\nfrom torch.jit.annotations import List, Optional, Tuple\n\n\nclass Conv2d(torch.nn.Conv2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.Conv2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.Conv2d instead.\", FutureWarning)\n\n\nclass ConvTranspose2d(torch.nn.ConvTranspose2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.ConvTranspose2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.ConvTranspose2d instead.\", FutureWarning)\n\n\nclass BatchNorm2d(torch.nn.BatchNorm2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.BatchNorm2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.BatchNorm2d instead.\", FutureWarning)\n\n\ninterpolate = torch.nn.functional.interpolate\n\n\n# This is not in nn\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters\n are fixed\n \"\"\"\n\n def __init__(\n self,\n num_features: int,\n eps: float = 0.,\n n: Optional[int] = None,\n ):\n # n=None for backward-compatibility\n if n is not None:\n warnings.warn(\"`n` argument is deprecated and has been renamed `num_features`\",\n DeprecationWarning)\n num_features = n\n super(FrozenBatchNorm2d, self).__init__()\n self.eps = eps\n self.register_buffer(\"weight\", torch.ones(num_features))\n self.register_buffer(\"bias\", torch.zeros(num_features))\n self.register_buffer(\"running_mean\", torch.zeros(num_features))\n self.register_buffer(\"running_var\", torch.ones(num_features))\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: List[str],\n unexpected_keys: List[str],\n error_msgs: List[str],\n ):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x: Tensor) -> Tensor:\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n scale = w * (rv + self.eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})\"\n",
"path": "torchvision/ops/misc.py"
}
] | [
{
"content": "\"\"\"\nhelper class that supports empty tensors on some nn functions.\n\nIdeally, add support directly in PyTorch to empty tensors in\nthose functions.\n\nThis can be removed once https://github.com/pytorch/pytorch/issues/12013\nis implemented\n\"\"\"\n\nimport warnings\nimport torch\nfrom torch import Tensor, Size\nfrom torch.jit.annotations import List, Optional, Tuple\n\n\nclass Conv2d(torch.nn.Conv2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.Conv2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.Conv2d instead.\", FutureWarning)\n\n\nclass ConvTranspose2d(torch.nn.ConvTranspose2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.ConvTranspose2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.ConvTranspose2d instead.\", FutureWarning)\n\n\nclass BatchNorm2d(torch.nn.BatchNorm2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n warnings.warn(\n \"torchvision.ops.misc.BatchNorm2d is deprecated and will be \"\n \"removed in future versions, use torch.nn.BatchNorm2d instead.\", FutureWarning)\n\n\ninterpolate = torch.nn.functional.interpolate\n\n\n# This is not in nn\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters\n are fixed\n \"\"\"\n\n def __init__(\n self,\n num_features: int,\n eps: float = 1e-5,\n n: Optional[int] = None,\n ):\n # n=None for backward-compatibility\n if n is not None:\n warnings.warn(\"`n` argument is deprecated and has been renamed `num_features`\",\n DeprecationWarning)\n num_features = n\n super(FrozenBatchNorm2d, self).__init__()\n self.eps = eps\n self.register_buffer(\"weight\", torch.ones(num_features))\n self.register_buffer(\"bias\", torch.zeros(num_features))\n self.register_buffer(\"running_mean\", torch.zeros(num_features))\n self.register_buffer(\"running_var\", torch.ones(num_features))\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: List[str],\n unexpected_keys: List[str],\n error_msgs: List[str],\n ):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x: Tensor) -> Tensor:\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n scale = w * (rv + self.eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})\"\n",
"path": "torchvision/ops/misc.py"
}
] | diff --git a/test/test_models.py b/test/test_models.py
index acff816852b..b37fb176a2b 100644
--- a/test/test_models.py
+++ b/test/test_models.py
@@ -6,9 +6,10 @@
import numpy as np
from torchvision import models
import unittest
-import traceback
import random
+from torchvision.ops.misc import FrozenBatchNorm2d
+
def set_rng_seed(seed):
torch.manual_seed(seed)
@@ -149,6 +150,10 @@ def _test_detection_model(self, name, dev):
if "retinanet" in name:
kwargs["score_thresh"] = 0.013
model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False, **kwargs)
+ if "keypointrcnn" in name or "retinanet" in name:
+ for module in model.modules():
+ if isinstance(module, FrozenBatchNorm2d):
+ module.eps = 0
model.eval().to(device=dev)
input_shape = (3, 300, 300)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
diff --git a/test/test_ops.py b/test/test_ops.py
index 7c13de4dedc..79294ed173e 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -623,10 +623,10 @@ def test_frozenbatchnorm2d_eps(self):
running_var=torch.rand(sample_size[1]),
num_batches_tracked=torch.tensor(100))
- # Check that default eps is zero for backward-compatibility
+ # Check that default eps is equal to the one of BN
fbn = ops.misc.FrozenBatchNorm2d(sample_size[1])
fbn.load_state_dict(state_dict, strict=False)
- bn = torch.nn.BatchNorm2d(sample_size[1], eps=0).eval()
+ bn = torch.nn.BatchNorm2d(sample_size[1]).eval()
bn.load_state_dict(state_dict)
# Difference is expected to fall in an acceptable range
self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6))
diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py
index 3b52c0d8c4d..3e9f13c9daf 100644
--- a/torchvision/ops/misc.py
+++ b/torchvision/ops/misc.py
@@ -51,7 +51,7 @@ class FrozenBatchNorm2d(torch.nn.Module):
def __init__(
self,
num_features: int,
- eps: float = 0.,
+ eps: float = 1e-5,
n: Optional[int] = None,
):
# n=None for backward-compatibility
|
OCA__server-tools-464 | runbot 9.0 red due to letsencrypt?
Hi,
It seems the 9.0 branch is red on runbot due to the letsencrypt module?
```
Call of self.pool.get('letsencrypt').cron(cr, uid, *()) failed in Job 2
Traceback (most recent call last):
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/base/ir/ir_cron.py", line 129, in _callback
getattr(model, method_name)(cr, uid, *args)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 250, in wrapper
return old_api(self, *args, **kwargs)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 354, in old_api
result = method(recs, *args, **kwargs)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/letsencrypt/models/letsencrypt.py", line 151, in cron
account_key, csr, acme_challenge, log=_logger, CA=DEFAULT_CA)
File "/srv/openerp/instances/openerp-oca-runbot/sandbox/local/lib/python2.7/site-packages/acme_tiny.py", line 104, in get_crt
raise ValueError("Error requesting challenges: {0} {1}".format(code, result))
ValueError: Error requesting challenges: 400 {
"type": "urn:acme:error:malformed",
"detail": "Error creating new authz :: Invalid character in DNS name",
"status": 400
}
```
@hbrunn
| [
{
"content": "# -*- coding: utf-8 -*-\n# © 2016 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Let's encrypt\",\n \"version\": \"9.0.1.0.0\",\n \"author\": \"Therp BV,\"\n \"Tecnativa,\"\n \"Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Hidden/Dependency\",\n \"summary\": \"Request SSL certificates from letsencrypt.org\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n \"external_dependencies\": {\n 'bin': [\n 'openssl',\n ],\n 'python': [\n 'acme_tiny',\n 'IPy',\n ],\n },\n}\n",
"path": "letsencrypt/__openerp__.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# © 2016 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Let's encrypt\",\n \"version\": \"9.0.1.0.0\",\n \"author\": \"Therp BV,\"\n \"Tecnativa,\"\n \"Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Hidden/Dependency\",\n \"summary\": \"Request SSL certificates from letsencrypt.org\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n \"demo/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n \"external_dependencies\": {\n 'bin': [\n 'openssl',\n ],\n 'python': [\n 'acme_tiny',\n 'IPy',\n ],\n },\n}\n",
"path": "letsencrypt/__openerp__.py"
}
] | diff --git a/auth_supplier/security/auth_supplier_security.xml b/auth_supplier/security/auth_supplier_security.xml
index 93293bee2f6..0108e381f29 100644
--- a/auth_supplier/security/auth_supplier_security.xml
+++ b/auth_supplier/security/auth_supplier_security.xml
@@ -4,7 +4,6 @@
<record id="group_auth_supplier" model="res.groups">
<field name="name">Supplier Portal</field>
<field name="category_id" ref="base.module_category_extra"/>
- <field name="is_portal" eval="True"/>
</record>
</odoo>
diff --git a/letsencrypt/__openerp__.py b/letsencrypt/__openerp__.py
index 01457b8073a..626b17e12b2 100644
--- a/letsencrypt/__openerp__.py
+++ b/letsencrypt/__openerp__.py
@@ -16,6 +16,7 @@
"data": [
"data/ir_config_parameter.xml",
"data/ir_cron.xml",
+ "demo/ir_cron.xml",
],
"post_init_hook": 'post_init_hook',
"installable": True,
diff --git a/letsencrypt/demo/ir_cron.xml b/letsencrypt/demo/ir_cron.xml
new file mode 100644
index 00000000000..e4451aa5946
--- /dev/null
+++ b/letsencrypt/demo/ir_cron.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<openerp>
+ <data>
+ <record id="cronjob" model="ir.cron">
+ <field name="active" eval="False" />
+ </record>
+ </data>
+</openerp>
|
scrapy__scrapy-4503 | Fix the hoverxref configuration
> You shouldn't override hoverxref_version and hoverxref_project since they are taken automatically from Read the Docs.
>
> If you want to avoid your CI failing because of this, you can define the environment variables as Read the Docs does:
>
> READTHEDOCS_PROJECT=scrapy
> READTHEDOCS_VERSION=''
>
> With the current configuration, all the versions built on Read the Docs will point to a different version on Read the Docs and this will conflict. For example, current master version in Read the Docs defines hoverxref_version='2.0.0' but that version does not exist on Read the Docs and the tooltip does not known where to get the content from.
@humitos at https://github.com/scrapy/scrapy/pull/4480#discussion_r409026912
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_project = \"scrapy\"\nhoverxref_version = release\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008–{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract’s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 4414ef6371a..813417bae17 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -295,8 +295,6 @@
# ------------------------------------
hoverxref_auto_ref = True
-hoverxref_project = "scrapy"
-hoverxref_version = release
hoverxref_role_types = {
"class": "tooltip",
"confval": "tooltip",
diff --git a/tox.ini b/tox.ini
index b1babc7fd63..cd118c921d0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -74,11 +74,15 @@ deps =
changedir = docs
deps =
-rdocs/requirements.txt
+setenv =
+ READTHEDOCS_PROJECT=scrapy
+ READTHEDOCS_VERSION=master
[testenv:docs]
basepython = python3
changedir = {[docs]changedir}
deps = {[docs]deps}
+setenv = {[docs]setenv}
commands =
sphinx-build -W -b html . {envtmpdir}/html
@@ -86,6 +90,7 @@ commands =
basepython = python3
changedir = {[docs]changedir}
deps = {[docs]deps}
+setenv = {[docs]setenv}
commands =
sphinx-build -b coverage . {envtmpdir}/coverage
@@ -93,6 +98,7 @@ commands =
basepython = python3
changedir = {[docs]changedir}
deps = {[docs]deps}
+setenv = {[docs]setenv}
commands =
sphinx-build -W -b linkcheck . {envtmpdir}/linkcheck
|
pallets__click-2187 | click.echo is improperly typed
I'm getting a repeat of #2174 : although click.secho has been fixed, pyright is continuing to complain about the type annotation for click.echo
#2175 only fixes the problem for click.secho, I think the same should be done for click.echo?
(Running with 1c588834)
| [
{
"content": "import os\nimport sys\nimport typing as t\nfrom functools import update_wrapper\nfrom types import ModuleType\n\nfrom ._compat import _default_text_stderr\nfrom ._compat import _default_text_stdout\nfrom ._compat import _find_binary_writer\nfrom ._compat import auto_wrap_for_ansi\nfrom ._compat import binary_streams\nfrom ._compat import get_filesystem_encoding\nfrom ._compat import open_stream\nfrom ._compat import should_strip_ansi\nfrom ._compat import strip_ansi\nfrom ._compat import text_streams\nfrom ._compat import WIN\nfrom .globals import resolve_color_default\n\nif t.TYPE_CHECKING:\n import typing_extensions as te\n\nF = t.TypeVar(\"F\", bound=t.Callable[..., t.Any])\n\n\ndef _posixify(name: str) -> str:\n return \"-\".join(name.split()).lower()\n\n\ndef safecall(func: F) -> F:\n \"\"\"Wraps a function so that it swallows exceptions.\"\"\"\n\n def wrapper(*args, **kwargs): # type: ignore\n try:\n return func(*args, **kwargs)\n except Exception:\n pass\n\n return update_wrapper(t.cast(F, wrapper), func)\n\n\ndef make_str(value: t.Any) -> str:\n \"\"\"Converts a value into a valid string.\"\"\"\n if isinstance(value, bytes):\n try:\n return value.decode(get_filesystem_encoding())\n except UnicodeError:\n return value.decode(\"utf-8\", \"replace\")\n return str(value)\n\n\ndef make_default_short_help(help: str, max_length: int = 45) -> str:\n \"\"\"Returns a condensed version of help string.\"\"\"\n # Consider only the first paragraph.\n paragraph_end = help.find(\"\\n\\n\")\n\n if paragraph_end != -1:\n help = help[:paragraph_end]\n\n # Collapse newlines, tabs, and spaces.\n words = help.split()\n\n if not words:\n return \"\"\n\n # The first paragraph started with a \"no rewrap\" marker, ignore it.\n if words[0] == \"\\b\":\n words = words[1:]\n\n total_length = 0\n last_index = len(words) - 1\n\n for i, word in enumerate(words):\n total_length += len(word) + (i > 0)\n\n if total_length > max_length: # too long, truncate\n break\n\n if word[-1] == \".\": # sentence end, truncate without \"...\"\n return \" \".join(words[: i + 1])\n\n if total_length == max_length and i != last_index:\n break # not at sentence end, truncate with \"...\"\n else:\n return \" \".join(words) # no truncation needed\n\n # Account for the length of the suffix.\n total_length += len(\"...\")\n\n # remove words until the length is short enough\n while i > 0:\n total_length -= len(words[i]) + (i > 0)\n\n if total_length <= max_length:\n break\n\n i -= 1\n\n return \" \".join(words[:i]) + \"...\"\n\n\nclass LazyFile:\n \"\"\"A lazy file works like a regular file but it does not fully open\n the file but it does perform some basic checks early to see if the\n filename parameter does make sense. This is useful for safely opening\n files for writing.\n \"\"\"\n\n def __init__(\n self,\n filename: str,\n mode: str = \"r\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n atomic: bool = False,\n ):\n self.name = filename\n self.mode = mode\n self.encoding = encoding\n self.errors = errors\n self.atomic = atomic\n self._f: t.Optional[t.IO]\n\n if filename == \"-\":\n self._f, self.should_close = open_stream(filename, mode, encoding, errors)\n else:\n if \"r\" in mode:\n # Open and close the file in case we're opening it for\n # reading so that we can catch at least some errors in\n # some cases early.\n open(filename, mode).close()\n self._f = None\n self.should_close = True\n\n def __getattr__(self, name: str) -> t.Any:\n return getattr(self.open(), name)\n\n def __repr__(self) -> str:\n if self._f is not None:\n return repr(self._f)\n return f\"<unopened file '{self.name}' {self.mode}>\"\n\n def open(self) -> t.IO:\n \"\"\"Opens the file if it's not yet open. This call might fail with\n a :exc:`FileError`. Not handling this error will produce an error\n that Click shows.\n \"\"\"\n if self._f is not None:\n return self._f\n try:\n rv, self.should_close = open_stream(\n self.name, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n except OSError as e: # noqa: E402\n from .exceptions import FileError\n\n raise FileError(self.name, hint=e.strerror) from e\n self._f = rv\n return rv\n\n def close(self) -> None:\n \"\"\"Closes the underlying file, no matter what.\"\"\"\n if self._f is not None:\n self._f.close()\n\n def close_intelligently(self) -> None:\n \"\"\"This function only closes the file if it was opened by the lazy\n file wrapper. For instance this will never close stdin.\n \"\"\"\n if self.should_close:\n self.close()\n\n def __enter__(self) -> \"LazyFile\":\n return self\n\n def __exit__(self, exc_type, exc_value, tb): # type: ignore\n self.close_intelligently()\n\n def __iter__(self) -> t.Iterator[t.AnyStr]:\n self.open()\n return iter(self._f) # type: ignore\n\n\nclass KeepOpenFile:\n def __init__(self, file: t.IO) -> None:\n self._file = file\n\n def __getattr__(self, name: str) -> t.Any:\n return getattr(self._file, name)\n\n def __enter__(self) -> \"KeepOpenFile\":\n return self\n\n def __exit__(self, exc_type, exc_value, tb): # type: ignore\n pass\n\n def __repr__(self) -> str:\n return repr(self._file)\n\n def __iter__(self) -> t.Iterator[t.AnyStr]:\n return iter(self._file)\n\n\ndef echo(\n message: t.Optional[t.Any] = None,\n file: t.Optional[t.IO] = None,\n nl: bool = True,\n err: bool = False,\n color: t.Optional[bool] = None,\n) -> None:\n \"\"\"Print a message and newline to stdout or a file. This should be\n used instead of :func:`print` because it provides better support\n for different data, files, and environments.\n\n Compared to :func:`print`, this does the following:\n\n - Ensures that the output encoding is not misconfigured on Linux.\n - Supports Unicode in the Windows console.\n - Supports writing to binary outputs, and supports writing bytes\n to text outputs.\n - Supports colors and styles on Windows.\n - Removes ANSI color and style codes if the output does not look\n like an interactive terminal.\n - Always flushes the output.\n\n :param message: The string or bytes to output. Other objects are\n converted to strings.\n :param file: The file to write to. Defaults to ``stdout``.\n :param err: Write to ``stderr`` instead of ``stdout``.\n :param nl: Print a newline after the message. Enabled by default.\n :param color: Force showing or hiding colors and other styles. By\n default Click will remove color if the output does not look like\n an interactive terminal.\n\n .. versionchanged:: 6.0\n Support Unicode output on the Windows console. Click does not\n modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``\n will still not support Unicode.\n\n .. versionchanged:: 4.0\n Added the ``color`` parameter.\n\n .. versionadded:: 3.0\n Added the ``err`` parameter.\n\n .. versionchanged:: 2.0\n Support colors on Windows if colorama is installed.\n \"\"\"\n if file is None:\n if err:\n file = _default_text_stderr()\n else:\n file = _default_text_stdout()\n\n # Convert non bytes/text into the native string type.\n if message is not None and not isinstance(message, (str, bytes, bytearray)):\n out: t.Optional[t.Union[str, bytes]] = str(message)\n else:\n out = message\n\n if nl:\n out = out or \"\"\n if isinstance(out, str):\n out += \"\\n\"\n else:\n out += b\"\\n\"\n\n if not out:\n file.flush()\n return\n\n # If there is a message and the value looks like bytes, we manually\n # need to find the binary stream and write the message in there.\n # This is done separately so that most stream types will work as you\n # would expect. Eg: you can write to StringIO for other cases.\n if isinstance(out, (bytes, bytearray)):\n binary_file = _find_binary_writer(file)\n\n if binary_file is not None:\n file.flush()\n binary_file.write(out)\n binary_file.flush()\n return\n\n # ANSI style code support. For no message or bytes, nothing happens.\n # When outputting to a file instead of a terminal, strip codes.\n else:\n color = resolve_color_default(color)\n\n if should_strip_ansi(file, color):\n out = strip_ansi(out)\n elif WIN:\n if auto_wrap_for_ansi is not None:\n file = auto_wrap_for_ansi(file) # type: ignore\n elif not color:\n out = strip_ansi(out)\n\n file.write(out) # type: ignore\n file.flush()\n\n\ndef get_binary_stream(name: \"te.Literal['stdin', 'stdout', 'stderr']\") -> t.BinaryIO:\n \"\"\"Returns a system stream for byte processing.\n\n :param name: the name of the stream to open. Valid names are ``'stdin'``,\n ``'stdout'`` and ``'stderr'``\n \"\"\"\n opener = binary_streams.get(name)\n if opener is None:\n raise TypeError(f\"Unknown standard stream '{name}'\")\n return opener()\n\n\ndef get_text_stream(\n name: \"te.Literal['stdin', 'stdout', 'stderr']\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n) -> t.TextIO:\n \"\"\"Returns a system stream for text processing. This usually returns\n a wrapped stream around a binary stream returned from\n :func:`get_binary_stream` but it also can take shortcuts for already\n correctly configured streams.\n\n :param name: the name of the stream to open. Valid names are ``'stdin'``,\n ``'stdout'`` and ``'stderr'``\n :param encoding: overrides the detected default encoding.\n :param errors: overrides the default error mode.\n \"\"\"\n opener = text_streams.get(name)\n if opener is None:\n raise TypeError(f\"Unknown standard stream '{name}'\")\n return opener(encoding, errors)\n\n\ndef open_file(\n filename: str,\n mode: str = \"r\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n lazy: bool = False,\n atomic: bool = False,\n) -> t.IO:\n \"\"\"Open a file, with extra behavior to handle ``'-'`` to indicate\n a standard stream, lazy open on write, and atomic write. Similar to\n the behavior of the :class:`~click.File` param type.\n\n If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is\n wrapped so that using it in a context manager will not close it.\n This makes it possible to use the function without accidentally\n closing a standard stream:\n\n .. code-block:: python\n\n with open_file(filename) as f:\n ...\n\n :param filename: The name of the file to open, or ``'-'`` for\n ``stdin``/``stdout``.\n :param mode: The mode in which to open the file.\n :param encoding: The encoding to decode or encode a file opened in\n text mode.\n :param errors: The error handling mode.\n :param lazy: Wait to open the file until it is accessed. For read\n mode, the file is temporarily opened to raise access errors\n early, then closed until it is read again.\n :param atomic: Write to a temporary file and replace the given file\n on close.\n\n .. versionadded:: 3.0\n \"\"\"\n if lazy:\n return t.cast(t.IO, LazyFile(filename, mode, encoding, errors, atomic=atomic))\n\n f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)\n\n if not should_close:\n f = t.cast(t.IO, KeepOpenFile(f))\n\n return f\n\n\ndef get_os_args() -> t.Sequence[str]:\n \"\"\"Returns the argument part of ``sys.argv``, removing the first\n value which is the name of the script.\n\n .. deprecated:: 8.0\n Will be removed in Click 8.1. Access ``sys.argv[1:]`` directly\n instead.\n \"\"\"\n import warnings\n\n warnings.warn(\n \"'get_os_args' is deprecated and will be removed in Click 8.1.\"\n \" Access 'sys.argv[1:]' directly instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return sys.argv[1:]\n\n\ndef format_filename(\n filename: t.Union[str, bytes, os.PathLike], shorten: bool = False\n) -> str:\n \"\"\"Formats a filename for user display. The main purpose of this\n function is to ensure that the filename can be displayed at all. This\n will decode the filename to unicode if necessary in a way that it will\n not fail. Optionally, it can shorten the filename to not include the\n full path to the filename.\n\n :param filename: formats a filename for UI display. This will also convert\n the filename into unicode without failing.\n :param shorten: this optionally shortens the filename to strip of the\n path that leads up to it.\n \"\"\"\n if shorten:\n filename = os.path.basename(filename)\n\n return os.fsdecode(filename)\n\n\ndef get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:\n r\"\"\"Returns the config folder for the application. The default behavior\n is to return whatever is most appropriate for the operating system.\n\n To give you an idea, for an app called ``\"Foo Bar\"``, something like\n the following folders could be returned:\n\n Mac OS X:\n ``~/Library/Application Support/Foo Bar``\n Mac OS X (POSIX):\n ``~/.foo-bar``\n Unix:\n ``~/.config/foo-bar``\n Unix (POSIX):\n ``~/.foo-bar``\n Windows (roaming):\n ``C:\\Users\\<user>\\AppData\\Roaming\\Foo Bar``\n Windows (not roaming):\n ``C:\\Users\\<user>\\AppData\\Local\\Foo Bar``\n\n .. versionadded:: 2.0\n\n :param app_name: the application name. This should be properly capitalized\n and can contain whitespace.\n :param roaming: controls if the folder should be roaming or not on Windows.\n Has no affect otherwise.\n :param force_posix: if this is set to `True` then on any POSIX system the\n folder will be stored in the home folder with a leading\n dot instead of the XDG config home or darwin's\n application support folder.\n \"\"\"\n if WIN:\n key = \"APPDATA\" if roaming else \"LOCALAPPDATA\"\n folder = os.environ.get(key)\n if folder is None:\n folder = os.path.expanduser(\"~\")\n return os.path.join(folder, app_name)\n if force_posix:\n return os.path.join(os.path.expanduser(f\"~/.{_posixify(app_name)}\"))\n if sys.platform == \"darwin\":\n return os.path.join(\n os.path.expanduser(\"~/Library/Application Support\"), app_name\n )\n return os.path.join(\n os.environ.get(\"XDG_CONFIG_HOME\", os.path.expanduser(\"~/.config\")),\n _posixify(app_name),\n )\n\n\nclass PacifyFlushWrapper:\n \"\"\"This wrapper is used to catch and suppress BrokenPipeErrors resulting\n from ``.flush()`` being called on broken pipe during the shutdown/final-GC\n of the Python interpreter. Notably ``.flush()`` is always called on\n ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any\n other cleanup code, and the case where the underlying file is not a broken\n pipe, all calls and attributes are proxied.\n \"\"\"\n\n def __init__(self, wrapped: t.IO) -> None:\n self.wrapped = wrapped\n\n def flush(self) -> None:\n try:\n self.wrapped.flush()\n except OSError as e:\n import errno\n\n if e.errno != errno.EPIPE:\n raise\n\n def __getattr__(self, attr: str) -> t.Any:\n return getattr(self.wrapped, attr)\n\n\ndef _detect_program_name(\n path: t.Optional[str] = None, _main: ModuleType = sys.modules[\"__main__\"]\n) -> str:\n \"\"\"Determine the command used to run the program, for use in help\n text. If a file or entry point was executed, the file name is\n returned. If ``python -m`` was used to execute a module or package,\n ``python -m name`` is returned.\n\n This doesn't try to be too precise, the goal is to give a concise\n name for help text. Files are only shown as their name without the\n path. ``python`` is only shown for modules, and the full path to\n ``sys.executable`` is not shown.\n\n :param path: The Python file being executed. Python puts this in\n ``sys.argv[0]``, which is used by default.\n :param _main: The ``__main__`` module. This should only be passed\n during internal testing.\n\n .. versionadded:: 8.0\n Based on command args detection in the Werkzeug reloader.\n\n :meta private:\n \"\"\"\n if not path:\n path = sys.argv[0]\n\n # The value of __package__ indicates how Python was called. It may\n # not exist if a setuptools script is installed as an egg. It may be\n # set incorrectly for entry points created with pip on Windows.\n if getattr(_main, \"__package__\", None) is None or (\n os.name == \"nt\"\n and _main.__package__ == \"\"\n and not os.path.exists(path)\n and os.path.exists(f\"{path}.exe\")\n ):\n # Executed a file, like \"python app.py\".\n return os.path.basename(path)\n\n # Executed a module, like \"python -m example\".\n # Rewritten by Python from \"-m script\" to \"/path/to/script.py\".\n # Need to look at main module to determine how it was executed.\n py_module = t.cast(str, _main.__package__)\n name = os.path.splitext(os.path.basename(path))[0]\n\n # A submodule like \"example.cli\".\n if name != \"__main__\":\n py_module = f\"{py_module}.{name}\"\n\n return f\"python -m {py_module.lstrip('.')}\"\n\n\ndef _expand_args(\n args: t.Iterable[str],\n *,\n user: bool = True,\n env: bool = True,\n glob_recursive: bool = True,\n) -> t.List[str]:\n \"\"\"Simulate Unix shell expansion with Python functions.\n\n See :func:`glob.glob`, :func:`os.path.expanduser`, and\n :func:`os.path.expandvars`.\n\n This intended for use on Windows, where the shell does not do any\n expansion. It may not exactly match what a Unix shell would do.\n\n :param args: List of command line arguments to expand.\n :param user: Expand user home directory.\n :param env: Expand environment variables.\n :param glob_recursive: ``**`` matches directories recursively.\n\n .. versionadded:: 8.0\n\n :meta private:\n \"\"\"\n from glob import glob\n\n out = []\n\n for arg in args:\n if user:\n arg = os.path.expanduser(arg)\n\n if env:\n arg = os.path.expandvars(arg)\n\n matches = glob(arg, recursive=glob_recursive)\n\n if not matches:\n out.append(arg)\n else:\n out.extend(matches)\n\n return out\n",
"path": "src/click/utils.py"
}
] | [
{
"content": "import os\nimport sys\nimport typing as t\nfrom functools import update_wrapper\nfrom types import ModuleType\n\nfrom ._compat import _default_text_stderr\nfrom ._compat import _default_text_stdout\nfrom ._compat import _find_binary_writer\nfrom ._compat import auto_wrap_for_ansi\nfrom ._compat import binary_streams\nfrom ._compat import get_filesystem_encoding\nfrom ._compat import open_stream\nfrom ._compat import should_strip_ansi\nfrom ._compat import strip_ansi\nfrom ._compat import text_streams\nfrom ._compat import WIN\nfrom .globals import resolve_color_default\n\nif t.TYPE_CHECKING:\n import typing_extensions as te\n\nF = t.TypeVar(\"F\", bound=t.Callable[..., t.Any])\n\n\ndef _posixify(name: str) -> str:\n return \"-\".join(name.split()).lower()\n\n\ndef safecall(func: F) -> F:\n \"\"\"Wraps a function so that it swallows exceptions.\"\"\"\n\n def wrapper(*args, **kwargs): # type: ignore\n try:\n return func(*args, **kwargs)\n except Exception:\n pass\n\n return update_wrapper(t.cast(F, wrapper), func)\n\n\ndef make_str(value: t.Any) -> str:\n \"\"\"Converts a value into a valid string.\"\"\"\n if isinstance(value, bytes):\n try:\n return value.decode(get_filesystem_encoding())\n except UnicodeError:\n return value.decode(\"utf-8\", \"replace\")\n return str(value)\n\n\ndef make_default_short_help(help: str, max_length: int = 45) -> str:\n \"\"\"Returns a condensed version of help string.\"\"\"\n # Consider only the first paragraph.\n paragraph_end = help.find(\"\\n\\n\")\n\n if paragraph_end != -1:\n help = help[:paragraph_end]\n\n # Collapse newlines, tabs, and spaces.\n words = help.split()\n\n if not words:\n return \"\"\n\n # The first paragraph started with a \"no rewrap\" marker, ignore it.\n if words[0] == \"\\b\":\n words = words[1:]\n\n total_length = 0\n last_index = len(words) - 1\n\n for i, word in enumerate(words):\n total_length += len(word) + (i > 0)\n\n if total_length > max_length: # too long, truncate\n break\n\n if word[-1] == \".\": # sentence end, truncate without \"...\"\n return \" \".join(words[: i + 1])\n\n if total_length == max_length and i != last_index:\n break # not at sentence end, truncate with \"...\"\n else:\n return \" \".join(words) # no truncation needed\n\n # Account for the length of the suffix.\n total_length += len(\"...\")\n\n # remove words until the length is short enough\n while i > 0:\n total_length -= len(words[i]) + (i > 0)\n\n if total_length <= max_length:\n break\n\n i -= 1\n\n return \" \".join(words[:i]) + \"...\"\n\n\nclass LazyFile:\n \"\"\"A lazy file works like a regular file but it does not fully open\n the file but it does perform some basic checks early to see if the\n filename parameter does make sense. This is useful for safely opening\n files for writing.\n \"\"\"\n\n def __init__(\n self,\n filename: str,\n mode: str = \"r\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n atomic: bool = False,\n ):\n self.name = filename\n self.mode = mode\n self.encoding = encoding\n self.errors = errors\n self.atomic = atomic\n self._f: t.Optional[t.IO]\n\n if filename == \"-\":\n self._f, self.should_close = open_stream(filename, mode, encoding, errors)\n else:\n if \"r\" in mode:\n # Open and close the file in case we're opening it for\n # reading so that we can catch at least some errors in\n # some cases early.\n open(filename, mode).close()\n self._f = None\n self.should_close = True\n\n def __getattr__(self, name: str) -> t.Any:\n return getattr(self.open(), name)\n\n def __repr__(self) -> str:\n if self._f is not None:\n return repr(self._f)\n return f\"<unopened file '{self.name}' {self.mode}>\"\n\n def open(self) -> t.IO:\n \"\"\"Opens the file if it's not yet open. This call might fail with\n a :exc:`FileError`. Not handling this error will produce an error\n that Click shows.\n \"\"\"\n if self._f is not None:\n return self._f\n try:\n rv, self.should_close = open_stream(\n self.name, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n except OSError as e: # noqa: E402\n from .exceptions import FileError\n\n raise FileError(self.name, hint=e.strerror) from e\n self._f = rv\n return rv\n\n def close(self) -> None:\n \"\"\"Closes the underlying file, no matter what.\"\"\"\n if self._f is not None:\n self._f.close()\n\n def close_intelligently(self) -> None:\n \"\"\"This function only closes the file if it was opened by the lazy\n file wrapper. For instance this will never close stdin.\n \"\"\"\n if self.should_close:\n self.close()\n\n def __enter__(self) -> \"LazyFile\":\n return self\n\n def __exit__(self, exc_type, exc_value, tb): # type: ignore\n self.close_intelligently()\n\n def __iter__(self) -> t.Iterator[t.AnyStr]:\n self.open()\n return iter(self._f) # type: ignore\n\n\nclass KeepOpenFile:\n def __init__(self, file: t.IO) -> None:\n self._file = file\n\n def __getattr__(self, name: str) -> t.Any:\n return getattr(self._file, name)\n\n def __enter__(self) -> \"KeepOpenFile\":\n return self\n\n def __exit__(self, exc_type, exc_value, tb): # type: ignore\n pass\n\n def __repr__(self) -> str:\n return repr(self._file)\n\n def __iter__(self) -> t.Iterator[t.AnyStr]:\n return iter(self._file)\n\n\ndef echo(\n message: t.Optional[t.Any] = None,\n file: t.Optional[t.IO[t.Any]] = None,\n nl: bool = True,\n err: bool = False,\n color: t.Optional[bool] = None,\n) -> None:\n \"\"\"Print a message and newline to stdout or a file. This should be\n used instead of :func:`print` because it provides better support\n for different data, files, and environments.\n\n Compared to :func:`print`, this does the following:\n\n - Ensures that the output encoding is not misconfigured on Linux.\n - Supports Unicode in the Windows console.\n - Supports writing to binary outputs, and supports writing bytes\n to text outputs.\n - Supports colors and styles on Windows.\n - Removes ANSI color and style codes if the output does not look\n like an interactive terminal.\n - Always flushes the output.\n\n :param message: The string or bytes to output. Other objects are\n converted to strings.\n :param file: The file to write to. Defaults to ``stdout``.\n :param err: Write to ``stderr`` instead of ``stdout``.\n :param nl: Print a newline after the message. Enabled by default.\n :param color: Force showing or hiding colors and other styles. By\n default Click will remove color if the output does not look like\n an interactive terminal.\n\n .. versionchanged:: 6.0\n Support Unicode output on the Windows console. Click does not\n modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``\n will still not support Unicode.\n\n .. versionchanged:: 4.0\n Added the ``color`` parameter.\n\n .. versionadded:: 3.0\n Added the ``err`` parameter.\n\n .. versionchanged:: 2.0\n Support colors on Windows if colorama is installed.\n \"\"\"\n if file is None:\n if err:\n file = _default_text_stderr()\n else:\n file = _default_text_stdout()\n\n # Convert non bytes/text into the native string type.\n if message is not None and not isinstance(message, (str, bytes, bytearray)):\n out: t.Optional[t.Union[str, bytes]] = str(message)\n else:\n out = message\n\n if nl:\n out = out or \"\"\n if isinstance(out, str):\n out += \"\\n\"\n else:\n out += b\"\\n\"\n\n if not out:\n file.flush()\n return\n\n # If there is a message and the value looks like bytes, we manually\n # need to find the binary stream and write the message in there.\n # This is done separately so that most stream types will work as you\n # would expect. Eg: you can write to StringIO for other cases.\n if isinstance(out, (bytes, bytearray)):\n binary_file = _find_binary_writer(file)\n\n if binary_file is not None:\n file.flush()\n binary_file.write(out)\n binary_file.flush()\n return\n\n # ANSI style code support. For no message or bytes, nothing happens.\n # When outputting to a file instead of a terminal, strip codes.\n else:\n color = resolve_color_default(color)\n\n if should_strip_ansi(file, color):\n out = strip_ansi(out)\n elif WIN:\n if auto_wrap_for_ansi is not None:\n file = auto_wrap_for_ansi(file) # type: ignore\n elif not color:\n out = strip_ansi(out)\n\n file.write(out) # type: ignore\n file.flush()\n\n\ndef get_binary_stream(name: \"te.Literal['stdin', 'stdout', 'stderr']\") -> t.BinaryIO:\n \"\"\"Returns a system stream for byte processing.\n\n :param name: the name of the stream to open. Valid names are ``'stdin'``,\n ``'stdout'`` and ``'stderr'``\n \"\"\"\n opener = binary_streams.get(name)\n if opener is None:\n raise TypeError(f\"Unknown standard stream '{name}'\")\n return opener()\n\n\ndef get_text_stream(\n name: \"te.Literal['stdin', 'stdout', 'stderr']\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n) -> t.TextIO:\n \"\"\"Returns a system stream for text processing. This usually returns\n a wrapped stream around a binary stream returned from\n :func:`get_binary_stream` but it also can take shortcuts for already\n correctly configured streams.\n\n :param name: the name of the stream to open. Valid names are ``'stdin'``,\n ``'stdout'`` and ``'stderr'``\n :param encoding: overrides the detected default encoding.\n :param errors: overrides the default error mode.\n \"\"\"\n opener = text_streams.get(name)\n if opener is None:\n raise TypeError(f\"Unknown standard stream '{name}'\")\n return opener(encoding, errors)\n\n\ndef open_file(\n filename: str,\n mode: str = \"r\",\n encoding: t.Optional[str] = None,\n errors: t.Optional[str] = \"strict\",\n lazy: bool = False,\n atomic: bool = False,\n) -> t.IO:\n \"\"\"Open a file, with extra behavior to handle ``'-'`` to indicate\n a standard stream, lazy open on write, and atomic write. Similar to\n the behavior of the :class:`~click.File` param type.\n\n If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is\n wrapped so that using it in a context manager will not close it.\n This makes it possible to use the function without accidentally\n closing a standard stream:\n\n .. code-block:: python\n\n with open_file(filename) as f:\n ...\n\n :param filename: The name of the file to open, or ``'-'`` for\n ``stdin``/``stdout``.\n :param mode: The mode in which to open the file.\n :param encoding: The encoding to decode or encode a file opened in\n text mode.\n :param errors: The error handling mode.\n :param lazy: Wait to open the file until it is accessed. For read\n mode, the file is temporarily opened to raise access errors\n early, then closed until it is read again.\n :param atomic: Write to a temporary file and replace the given file\n on close.\n\n .. versionadded:: 3.0\n \"\"\"\n if lazy:\n return t.cast(t.IO, LazyFile(filename, mode, encoding, errors, atomic=atomic))\n\n f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)\n\n if not should_close:\n f = t.cast(t.IO, KeepOpenFile(f))\n\n return f\n\n\ndef get_os_args() -> t.Sequence[str]:\n \"\"\"Returns the argument part of ``sys.argv``, removing the first\n value which is the name of the script.\n\n .. deprecated:: 8.0\n Will be removed in Click 8.1. Access ``sys.argv[1:]`` directly\n instead.\n \"\"\"\n import warnings\n\n warnings.warn(\n \"'get_os_args' is deprecated and will be removed in Click 8.1.\"\n \" Access 'sys.argv[1:]' directly instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return sys.argv[1:]\n\n\ndef format_filename(\n filename: t.Union[str, bytes, os.PathLike], shorten: bool = False\n) -> str:\n \"\"\"Formats a filename for user display. The main purpose of this\n function is to ensure that the filename can be displayed at all. This\n will decode the filename to unicode if necessary in a way that it will\n not fail. Optionally, it can shorten the filename to not include the\n full path to the filename.\n\n :param filename: formats a filename for UI display. This will also convert\n the filename into unicode without failing.\n :param shorten: this optionally shortens the filename to strip of the\n path that leads up to it.\n \"\"\"\n if shorten:\n filename = os.path.basename(filename)\n\n return os.fsdecode(filename)\n\n\ndef get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:\n r\"\"\"Returns the config folder for the application. The default behavior\n is to return whatever is most appropriate for the operating system.\n\n To give you an idea, for an app called ``\"Foo Bar\"``, something like\n the following folders could be returned:\n\n Mac OS X:\n ``~/Library/Application Support/Foo Bar``\n Mac OS X (POSIX):\n ``~/.foo-bar``\n Unix:\n ``~/.config/foo-bar``\n Unix (POSIX):\n ``~/.foo-bar``\n Windows (roaming):\n ``C:\\Users\\<user>\\AppData\\Roaming\\Foo Bar``\n Windows (not roaming):\n ``C:\\Users\\<user>\\AppData\\Local\\Foo Bar``\n\n .. versionadded:: 2.0\n\n :param app_name: the application name. This should be properly capitalized\n and can contain whitespace.\n :param roaming: controls if the folder should be roaming or not on Windows.\n Has no affect otherwise.\n :param force_posix: if this is set to `True` then on any POSIX system the\n folder will be stored in the home folder with a leading\n dot instead of the XDG config home or darwin's\n application support folder.\n \"\"\"\n if WIN:\n key = \"APPDATA\" if roaming else \"LOCALAPPDATA\"\n folder = os.environ.get(key)\n if folder is None:\n folder = os.path.expanduser(\"~\")\n return os.path.join(folder, app_name)\n if force_posix:\n return os.path.join(os.path.expanduser(f\"~/.{_posixify(app_name)}\"))\n if sys.platform == \"darwin\":\n return os.path.join(\n os.path.expanduser(\"~/Library/Application Support\"), app_name\n )\n return os.path.join(\n os.environ.get(\"XDG_CONFIG_HOME\", os.path.expanduser(\"~/.config\")),\n _posixify(app_name),\n )\n\n\nclass PacifyFlushWrapper:\n \"\"\"This wrapper is used to catch and suppress BrokenPipeErrors resulting\n from ``.flush()`` being called on broken pipe during the shutdown/final-GC\n of the Python interpreter. Notably ``.flush()`` is always called on\n ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any\n other cleanup code, and the case where the underlying file is not a broken\n pipe, all calls and attributes are proxied.\n \"\"\"\n\n def __init__(self, wrapped: t.IO) -> None:\n self.wrapped = wrapped\n\n def flush(self) -> None:\n try:\n self.wrapped.flush()\n except OSError as e:\n import errno\n\n if e.errno != errno.EPIPE:\n raise\n\n def __getattr__(self, attr: str) -> t.Any:\n return getattr(self.wrapped, attr)\n\n\ndef _detect_program_name(\n path: t.Optional[str] = None, _main: ModuleType = sys.modules[\"__main__\"]\n) -> str:\n \"\"\"Determine the command used to run the program, for use in help\n text. If a file or entry point was executed, the file name is\n returned. If ``python -m`` was used to execute a module or package,\n ``python -m name`` is returned.\n\n This doesn't try to be too precise, the goal is to give a concise\n name for help text. Files are only shown as their name without the\n path. ``python`` is only shown for modules, and the full path to\n ``sys.executable`` is not shown.\n\n :param path: The Python file being executed. Python puts this in\n ``sys.argv[0]``, which is used by default.\n :param _main: The ``__main__`` module. This should only be passed\n during internal testing.\n\n .. versionadded:: 8.0\n Based on command args detection in the Werkzeug reloader.\n\n :meta private:\n \"\"\"\n if not path:\n path = sys.argv[0]\n\n # The value of __package__ indicates how Python was called. It may\n # not exist if a setuptools script is installed as an egg. It may be\n # set incorrectly for entry points created with pip on Windows.\n if getattr(_main, \"__package__\", None) is None or (\n os.name == \"nt\"\n and _main.__package__ == \"\"\n and not os.path.exists(path)\n and os.path.exists(f\"{path}.exe\")\n ):\n # Executed a file, like \"python app.py\".\n return os.path.basename(path)\n\n # Executed a module, like \"python -m example\".\n # Rewritten by Python from \"-m script\" to \"/path/to/script.py\".\n # Need to look at main module to determine how it was executed.\n py_module = t.cast(str, _main.__package__)\n name = os.path.splitext(os.path.basename(path))[0]\n\n # A submodule like \"example.cli\".\n if name != \"__main__\":\n py_module = f\"{py_module}.{name}\"\n\n return f\"python -m {py_module.lstrip('.')}\"\n\n\ndef _expand_args(\n args: t.Iterable[str],\n *,\n user: bool = True,\n env: bool = True,\n glob_recursive: bool = True,\n) -> t.List[str]:\n \"\"\"Simulate Unix shell expansion with Python functions.\n\n See :func:`glob.glob`, :func:`os.path.expanduser`, and\n :func:`os.path.expandvars`.\n\n This intended for use on Windows, where the shell does not do any\n expansion. It may not exactly match what a Unix shell would do.\n\n :param args: List of command line arguments to expand.\n :param user: Expand user home directory.\n :param env: Expand environment variables.\n :param glob_recursive: ``**`` matches directories recursively.\n\n .. versionadded:: 8.0\n\n :meta private:\n \"\"\"\n from glob import glob\n\n out = []\n\n for arg in args:\n if user:\n arg = os.path.expanduser(arg)\n\n if env:\n arg = os.path.expandvars(arg)\n\n matches = glob(arg, recursive=glob_recursive)\n\n if not matches:\n out.append(arg)\n else:\n out.extend(matches)\n\n return out\n",
"path": "src/click/utils.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 6c9a79327..d02c3e952 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -14,7 +14,8 @@ Unreleased
- Fix a typo in the Bash completion script that affected file and
directory completion. If this script was generated by a previous
version, it should be regenerated. :issue:`2163`
-- Fix typing for ``secho`` file argument. :issue:`2174`
+- Fix typing for ``echo`` and ``secho`` file argument.
+ :issue:`2174, 2185`
Version 8.0.3
diff --git a/src/click/utils.py b/src/click/utils.py
index 051cf7009..8dd3a00c7 100644
--- a/src/click/utils.py
+++ b/src/click/utils.py
@@ -203,7 +203,7 @@ def __iter__(self) -> t.Iterator[t.AnyStr]:
def echo(
message: t.Optional[t.Any] = None,
- file: t.Optional[t.IO] = None,
+ file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
|
alltheplaces__alltheplaces-4514 | Domains missing from New Look websites
The new_look_gb.py spider is returning websites that are missing the domain name. This is because that's how the website appears in the schema.org block on the pages being scraped. e.g. we have `"url":"/uk/store/Beccles-Beccles-GB-1775"`.
The scheme and domain `https://www.newlook.com` needs to be prepended to each of the returned URLs.
This is the same issue as in #4302 but for a different spider.
| [
{
"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass NewLookGB(SitemapSpider, StructuredDataSpider):\n name = \"new_look_gb\"\n item_attributes = {\"brand\": \"New Look\", \"brand_wikidata\": \"Q12063852\"}\n sitemap_urls = [\"https://www.newlook.com/uk/sitemap/maps/sitemap_uk_pos_en_1.xml\"]\n sitemap_rules = [(r\"https:\\/\\/www\\.newlook\\.com\\/uk\\/store\\/[-\\w]+-(\\d+)$\", \"parse_sd\")]\n wanted_types = [\"Store\"]\n download_delay = 1\n\n def sitemap_filter(self, entries):\n for entry in entries:\n if \"closed\" not in entry[\"loc\"].lower():\n yield entry\n",
"path": "locations/spiders/new_look_gb.py"
}
] | [
{
"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass NewLookGB(SitemapSpider, StructuredDataSpider):\n name = \"new_look_gb\"\n item_attributes = {\"brand\": \"New Look\", \"brand_wikidata\": \"Q12063852\"}\n sitemap_urls = [\"https://www.newlook.com/uk/sitemap/maps/sitemap_uk_pos_en_1.xml\"]\n sitemap_rules = [(r\"https:\\/\\/www\\.newlook\\.com\\/uk\\/store\\/[-\\w]+-(\\d+)$\", \"parse_sd\")]\n wanted_types = [\"Store\"]\n download_delay = 1\n\n def sitemap_filter(self, entries):\n for entry in entries:\n if \"closed\" not in entry[\"loc\"].lower():\n yield entry\n\n def inspect_item(self, item, response):\n item[\"website\"] = response.urljoin(item[\"website\"])\n yield item\n",
"path": "locations/spiders/new_look_gb.py"
}
] | diff --git a/locations/spiders/new_look_gb.py b/locations/spiders/new_look_gb.py
index 18e75cd91eb..7dc6f8af6c8 100644
--- a/locations/spiders/new_look_gb.py
+++ b/locations/spiders/new_look_gb.py
@@ -15,3 +15,7 @@ def sitemap_filter(self, entries):
for entry in entries:
if "closed" not in entry["loc"].lower():
yield entry
+
+ def inspect_item(self, item, response):
+ item["website"] = response.urljoin(item["website"])
+ yield item
|
ibis-project__ibis-4637 | Problem with formatting union expressions when using `value_counts`
I'm working on a subclass of the MySQL backend and using unions. When attempting to do a `value_counts` on a union, I get an attribute error. Here is a simple test using our backend (this DataFrame upload might not work in the actual MySQL, but should be fairly simple to replicate).
```
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'b'])
tbl = conn.create_table('test_union', df, force=True)
u = ibis.union(tbl, tbl)
u.a.value_counts()
```
Here is the tail end of the exception.
```
...
~/.pyenv/versions/3.9.4/lib/python3.9/site-packages/ibis/expr/format.py in _fmt_value_expr(expr, aliases)
555 Forwards the call on to the specific operation dispatch rule.
556 """
--> 557 return fmt_value(expr.op(), aliases=aliases)
558
559
~/.pyenv/versions/3.9.4/lib/python3.9/functools.py in wrapper(*args, **kw)
875 '1 positional argument')
876
--> 877 return dispatch(args[0].__class__)(*args, **kw)
878
879 funcname = getattr(func, '__name__', 'singledispatch function')
~/.pyenv/versions/3.9.4/lib/python3.9/site-packages/ibis/expr/format.py in _fmt_value_table_node(op, aliases, **_)
669 if not hasattr(op, 'table'):
670 import pdb; pdb.set_trace()
--> 671 return f"{aliases[op.table.op()]}"
672
673
AttributeError: 'Union' object has no attribute 'table'
```
| [
{
"content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport rich.pretty\n\nimport ibis\nimport ibis.common.graph as graph\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport ibis.expr.window as win\nimport ibis.util as util\n\nAliases = Mapping[ops.TableNode, int]\nDeps = Deque[Tuple[int, ops.TableNode]]\n\n\nclass Alias:\n __slots__ = (\"value\",)\n\n def __init__(self, value: int) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return f\"r{self.value}\"\n\n\ndef fmt(expr: ir.Expr) -> str:\n \"\"\"Format `expr`.\n\n Main entry point for the `Expr.__repr__` implementation.\n\n Returns\n -------\n str\n Formatted expression\n \"\"\"\n *deps, root = graph.toposort(expr.op()).keys()\n deps = collections.deque(\n (Alias(alias), dep)\n for alias, dep in enumerate(\n dep for dep in deps if isinstance(dep, ops.TableNode)\n )\n )\n\n aliases = {dep: alias for alias, dep in deps}\n pieces = []\n\n while deps:\n alias, node = deps.popleft()\n formatted = fmt_table_op(node, aliases=aliases, deps=deps)\n pieces.append(f\"{alias} := {formatted}\")\n\n name = expr.get_name() if expr.has_name() else None\n pieces.append(fmt_root(root, name=name, aliases=aliases, deps=deps))\n depth = ibis.options.repr.depth or 0\n if depth and depth < len(pieces):\n return fmt_truncated(pieces, depth=depth)\n return \"\\n\\n\".join(pieces)\n\n\ndef fmt_truncated(\n pieces: Iterable[str],\n *,\n depth: int,\n sep: str = \"\\n\\n\",\n ellipsis: str = util.VERTICAL_ELLIPSIS,\n) -> str:\n if depth == 1:\n return pieces[-1]\n\n first_n = depth // 2\n last_m = depth - first_n\n return sep.join([*pieces[:first_n], ellipsis, *pieces[-last_m:]])\n\n\ndef selection_maxlen(nodes: Iterable[ops.Node]) -> int:\n \"\"\"Compute the length of the longest name of input expressions.\n\n Parameters\n ----------\n expressions\n Expressions whose name to compute the maximum length of\n\n Returns\n -------\n int\n Max length\n \"\"\"\n try:\n return max(len(node.name) for node in nodes if isinstance(node, ops.Named))\n except ValueError:\n return 0\n\n\[email protected]\ndef fmt_root(op: ops.Node, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Fallback formatting implementation.\"\"\"\n raw_parts = fmt_fields(\n op,\n dict.fromkeys(op.argnames, fmt_value),\n aliases=aliases,\n )\n return f\"{op.__class__.__name__}\\n{raw_parts}\"\n\n\n@fmt_root.register\ndef _fmt_root_table_node(op: ops.TableNode, **kwargs: Any) -> str:\n return fmt_table_op(op, **kwargs)\n\n\n@fmt_root.register\ndef _fmt_root_value_op(op: ops.Value, *, name: str, aliases: Aliases, **_: Any) -> str:\n value = fmt_value(op, aliases=aliases)\n prefix = f\"{name}: \" if name is not None else \"\"\n return f\"{prefix}{value}{type_info(op.to_expr().type())}\"\n\n\n@fmt_root.register(ops.SortKey)\ndef _fmt_root_sort_key(op: ops.SortKey, *, aliases: Aliases, **_: Any) -> str:\n return fmt_value(op, aliases=aliases)\n\n\[email protected]\ndef fmt_table_op(op: ops.TableNode, **_: Any) -> str:\n assert False, f\"`fmt_table_op` not implemented for operation: {type(op)}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n top = f\"{op.__class__.__name__}: {op.name}\"\n formatted_schema = fmt_schema(op.schema)\n return f\"{top}\\n{formatted_schema}\"\n\n\ndef fmt_schema(schema: sch.Schema) -> str:\n \"\"\"Format `schema`.\n\n Parameters\n ----------\n schema\n Ibis schema to format\n\n Returns\n -------\n str\n Formatted schema\n \"\"\"\n names = schema.names\n maxlen = max(map(len, names))\n cols = [f\"{name:<{maxlen}} {typ}\" for name, typ in schema.items()]\n depth = ibis.options.repr.table_columns\n if depth is not None and depth < len(cols):\n first_column_name = names[0]\n raw = fmt_truncated(\n cols,\n depth=depth,\n sep=\"\\n\",\n ellipsis=util.VERTICAL_ELLIPSIS.center(len(first_column_name)),\n )\n else:\n raw = \"\\n\".join(cols)\n\n return util.indent(raw, spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_query_result(op: ops.SQLQueryResult, **_: Any) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}\\n{util.indent(query, spaces=2)}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_view(op: ops.View, *, aliases: Aliases, **_: Any) -> str:\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}[{aliases[op.child]}]: {op.name}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_view(\n op: ops.SQLStringView,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n components = [\n f\"{top}[{aliases[op.child]}]: {op.name}\",\n util.indent(query, spaces=2),\n schema_field,\n ]\n return \"\\n\".join(components)\n\n\[email protected]\ndef fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n assert False, f\"join type {type(op)} not implemented\"\n\n\n@fmt_join.register(ops.Join)\ndef _fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n # format the operator and its relation inputs\n left = aliases[op.left]\n right = aliases[op.right]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n\n # format the join predicates\n # if only one, put it directly after the join on thes same line\n # if more than one put each on a separate line\n preds = op.predicates\n formatted_preds = [fmt_value(pred, aliases=aliases) for pred in preds]\n has_one_pred = len(preds) == 1\n sep = \" \" if has_one_pred else \"\\n\"\n joined_predicates = util.indent(\n \"\\n\".join(formatted_preds),\n spaces=2 * (not has_one_pred),\n )\n trailing_sep = \"\\n\" + \"\\n\" * (not has_one_pred)\n return f\"{top}{sep}{joined_predicates}\", trailing_sep\n\n\n@fmt_join.register(ops.AsOfJoin)\ndef _fmt_asof_join(op: ops.AsOfJoin, *, aliases: Aliases) -> tuple[str, str]:\n left = aliases[op.left]\n right = aliases[op.right]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n raw_parts = fmt_fields(\n op,\n dict(predicates=fmt_value, by=fmt_value, tolerance=fmt_value),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\", \"\\n\\n\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_join(\n op: ops.Join,\n *,\n aliases: Aliases,\n deps: Deps,\n **_: Any,\n) -> str:\n # first, format the current join operation\n result, join_sep = fmt_join(op, aliases=aliases)\n formatted_joins = [result, join_sep]\n\n # process until the first non-Join dependency is popped in other words\n # process all runs of joins\n alias, current = None, None\n if deps:\n alias, current = deps.popleft()\n\n while isinstance(current, ops.Join):\n # copy the alias so that mutations to the value aren't shared\n # format the `current` join\n formatted_join, join_sep = fmt_join(current, aliases=aliases)\n formatted_joins.append(f\"{alias} := {formatted_join}\")\n formatted_joins.append(join_sep)\n\n if not deps:\n break\n\n alias, current = deps.popleft()\n\n if current is not None and not isinstance(current, ops.Join):\n # the last node popped from `deps` isn't a join which means we\n # still need to process it, so we put it at the front of the queue\n deps.appendleft((alias, current))\n\n # we don't want the last trailing separator so remove it from the end\n formatted_joins.pop()\n return \"\".join(formatted_joins)\n\n\n@fmt_table_op.register\ndef _(op: ops.CrossJoin, *, aliases: Aliases, **_: Any) -> str:\n left = aliases[op.left]\n right = aliases[op.right]\n return f\"{op.__class__.__name__}[{left}, {right}]\"\n\n\ndef _fmt_set_op(\n op: ops.SetOp,\n *,\n aliases: Aliases,\n distinct: bool | None = None,\n) -> str:\n args = [str(aliases[op.left]), str(aliases[op.right])]\n if distinct is not None:\n args.append(f\"distinct={distinct}\")\n return f\"{op.__class__.__name__}[{', '.join(args)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_set_op(op: ops.SetOp, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_union(op: ops.Union, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases, distinct=op.distinct)\n\n\n@fmt_table_op.register(ops.SelfReference)\n@fmt_table_op.register(ops.Distinct)\ndef _fmt_table_op_self_reference_distinct(\n op: ops.Distinct | ops.SelfReference,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n return f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_fillna(op: ops.FillNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(op, dict(replacements=fmt_value), aliases=aliases)\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_dropna(op: ops.DropNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n how = f\"how: {op.how!r}\"\n raw_parts = fmt_fields(op, dict(subset=fmt_value), aliases=aliases)\n return f\"{top}\\n{util.indent(how, spaces=2)}\\n{raw_parts}\"\n\n\ndef fmt_fields(\n op: ops.TableNode,\n fields: Mapping[str, Callable[[Any, Aliases], str]],\n *,\n aliases: Aliases,\n) -> str:\n parts = []\n\n for field, formatter in fields.items():\n if exprs := [\n expr for expr in util.promote_list(getattr(op, field)) if expr is not None\n ]:\n field_fmt = [formatter(expr, aliases=aliases) for expr in exprs]\n\n parts.append(f\"{field}:\")\n parts.append(util.indent(\"\\n\".join(field_fmt), spaces=2))\n\n return util.indent(\"\\n\".join(parts), spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_selection(op: ops.Selection, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n selections=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.selections),\n ),\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_aggregation(\n op: ops.Aggregation, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n metrics=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.metrics),\n ),\n by=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.by),\n ),\n having=fmt_value,\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_limit(op: ops.Limit, *, aliases: Aliases, **_: Any) -> str:\n params = [str(aliases[op.table]), f\"n={op.n:d}\"]\n if offset := op.offset:\n params.append(f\"offset={offset:d}\")\n return f\"{op.__class__.__name__}[{', '.join(params)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_in_memory_table(op: ops.InMemoryTable, **_: Any) -> str:\n # arbitrary limit, but some value is needed to avoid a huge repr\n max_length = 10\n pretty_data = rich.pretty.pretty_repr(op.data, max_length=max_length)\n return \"\\n\".join(\n [\n op.__class__.__name__,\n util.indent(\"data:\", spaces=2),\n util.indent(pretty_data, spaces=4),\n ]\n )\n\n\[email protected]\ndef fmt_selection_column(value_expr: object, **_: Any) -> str:\n assert False, (\n \"expression type not implemented for \"\n f\"fmt_selection_column: {type(value_expr)}\"\n )\n\n\ndef type_info(datatype: dt.DataType) -> str:\n \"\"\"Format `datatype` for display next to a column.\"\"\"\n return f\" # {datatype}\" * ibis.options.repr.show_types\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_sequence(node: ops.NodeList, **kwargs):\n return \"\\n\".join(fmt_selection_column(value, **kwargs) for value in node.values)\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_value_expr(\n node: ops.Value, *, aliases: Aliases, maxlen: int = 0\n) -> str:\n name = f\"{node.name}:\"\n # the additional 1 is for the colon\n aligned_name = f\"{name:<{maxlen + 1}}\"\n value = fmt_value(node, aliases=aliases)\n dtype = type_info(node.output_dtype)\n return f\"{aligned_name} {value}{dtype}\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_table_expr(\n node: ops.TableNode, *, aliases: Aliases, **_: Any\n) -> str:\n return str(aliases[node])\n\n\n_BIN_OP_CHARS = {\n # comparison operations\n ops.Equals: \"==\",\n ops.IdenticalTo: \"===\",\n ops.NotEquals: \"!=\",\n ops.Less: \"<\",\n ops.LessEqual: \"<=\",\n ops.Greater: \">\",\n ops.GreaterEqual: \">=\",\n # arithmetic\n ops.Add: \"+\",\n ops.Subtract: \"-\",\n ops.Multiply: \"*\",\n ops.Divide: \"/\",\n ops.FloorDivide: \"//\",\n ops.Modulus: \"%\",\n ops.Power: \"**\",\n # temporal operations\n ops.DateAdd: \"+\",\n ops.DateSub: \"-\",\n ops.DateDiff: \"-\",\n ops.TimeAdd: \"+\",\n ops.TimeSub: \"-\",\n ops.TimeDiff: \"-\",\n ops.TimestampAdd: \"+\",\n ops.TimestampSub: \"-\",\n ops.TimestampDiff: \"-\",\n ops.IntervalAdd: \"+\",\n ops.IntervalSubtract: \"-\",\n ops.IntervalMultiply: \"*\",\n ops.IntervalFloorDivide: \"//\",\n # boolean operators\n ops.And: \"&\",\n ops.Or: \"|\",\n ops.Xor: \"^\",\n}\n\n\[email protected]\ndef fmt_value(obj, **_: Any) -> str:\n \"\"\"Format a value expression or operation.\n\n [`repr`][repr] the object if we don't have a specific formatting\n rule.\n \"\"\"\n return repr(obj)\n\n\n@fmt_value.register\ndef _fmt_value_function_type(func: types.FunctionType, **_: Any) -> str:\n return func.__name__\n\n\n@fmt_value.register\ndef _fmt_value_node(op: ops.Node, **_: Any) -> str:\n assert False, f\"`fmt_value` not implemented for operation: {type(op)}\"\n\n\n@fmt_value.register\ndef _fmt_value_sequence(op: ops.NodeList, **kwargs: Any) -> str:\n return \", \".join([fmt_value(value, **kwargs) for value in op])\n\n\n@fmt_value.register\ndef _fmt_value_expr(op: ops.Value, *, aliases: Aliases) -> str:\n \"\"\"Format a value expression.\n\n Forwards the call on to the specific operation dispatch rule.\n \"\"\"\n return fmt_value(op, aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_binary_op(op: ops.Binary, *, aliases: Aliases) -> str:\n left = fmt_value(op.left, aliases=aliases)\n right = fmt_value(op.right, aliases=aliases)\n try:\n op_char = _BIN_OP_CHARS[type(op)]\n except KeyError:\n return f\"{type(op).__name__}({left}, {right})\"\n else:\n return f\"{left} {op_char} {right}\"\n\n\n@fmt_value.register\ndef _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str:\n op_name = \"Not\" if isinstance(op.output_dtype, dt.Boolean) else \"Negate\"\n operand = fmt_value(op.arg, aliases=aliases)\n return f\"{op_name}({operand})\"\n\n\n@fmt_value.register\ndef _fmt_value_literal(op: ops.Literal, **_: Any) -> str:\n if isinstance(op.dtype, dt.Interval):\n return f\"{op.value} {op.dtype.unit}\"\n return repr(op.value)\n\n\n@fmt_value.register\ndef _fmt_value_datatype(datatype: dt.DataType, **_: Any) -> str:\n return str(datatype)\n\n\n@fmt_value.register\ndef _fmt_value_value_op(op: ops.Value, *, aliases: Aliases) -> str:\n args = []\n # loop over argument names and original expression\n for argname, orig_expr in zip(op.argnames, op.args):\n # promote argument to a list, so that we don't accidentially repr\n # entire subtrees when all we want is the formatted argument value\n if exprs := [expr for expr in util.promote_list(orig_expr) if expr is not None]:\n # format the individual argument values\n formatted_args = \", \".join(\n fmt_value(expr, aliases=aliases) for expr in exprs\n )\n # if the original argument was a non-string iterable, display it as\n # a list\n value = (\n f\"[{formatted_args}]\" if util.is_iterable(orig_expr) else formatted_args\n )\n # `arg` and `expr` are noisy, so we ignore printing them as a\n # special case\n if argname not in (\"arg\", \"expr\"):\n formatted = f\"{argname}={value}\"\n else:\n formatted = value\n args.append(formatted)\n\n return f\"{op.__class__.__name__}({', '.join(args)})\"\n\n\n@fmt_value.register\ndef _fmt_value_alias(op: ops.Alias, *, aliases: Aliases) -> str:\n return fmt_value(op.arg, aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_table_column(op: ops.TableColumn, *, aliases: Aliases) -> str:\n return f\"{aliases[op.table]}.{op.name}\"\n\n\n@fmt_value.register\ndef _fmt_value_scalar_parameter(op: ops.ScalarParameter, **_: Any) -> str:\n return f\"$({op.dtype})\"\n\n\n@fmt_value.register\ndef _fmt_value_sort_key(op: ops.SortKey, *, aliases: Aliases) -> str:\n expr = fmt_value(op.expr, aliases=aliases)\n prefix = \"asc\" if op.ascending else \"desc\"\n return f\"{prefix} {expr}\"\n\n\n@fmt_value.register\ndef _fmt_value_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression.\n An example is `table.count()`.\n \"\"\"\n return op.name\n\n\n@fmt_value.register\ndef _fmt_value_table_node(op: ops.TableNode, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression.\n An example is `table.count()`.\n \"\"\"\n return f\"{aliases[op.table]}\"\n\n\n@fmt_value.register\ndef _fmt_value_string_sql_like(op: ops.StringSQLLike, *, aliases: Aliases) -> str:\n expr = fmt_value(op.arg, aliases=aliases)\n pattern = fmt_value(op.pattern, aliases=aliases)\n prefix = \"I\" * isinstance(op, ops.StringSQLILike)\n return f\"{expr} {prefix}LIKE {pattern}\"\n\n\n@fmt_value.register\ndef _fmt_value_window(win: win.Window, *, aliases: Aliases) -> str:\n args = []\n for field, value in (\n (\"_group_by\", win._group_by),\n (\"_order_by\", win._order_by),\n (\"preceding\", win.preceding),\n (\"following\", win.following),\n (\"max_lookback\", win.max_lookback),\n (\"how\", win.how),\n ):\n disp_field = field.lstrip(\"_\")\n if value is not None:\n if isinstance(value, tuple):\n # don't show empty sequences\n if not value:\n continue\n elements = \", \".join(\n fmt_value(\n arg.op() if isinstance(arg, ir.Expr) else arg,\n aliases=aliases,\n )\n for arg in value\n )\n formatted = f\"[{elements}]\"\n else:\n formatted = fmt_value(value, aliases=aliases)\n args.append(f\"{disp_field}={formatted}\")\n return f\"{win.__class__.__name__}({', '.join(args)})\"\n",
"path": "ibis/expr/format.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport collections\nimport functools\nimport textwrap\nimport types\nfrom typing import Any, Callable, Deque, Iterable, Mapping, Tuple\n\nimport rich.pretty\n\nimport ibis\nimport ibis.common.graph as graph\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport ibis.expr.window as win\nimport ibis.util as util\n\nAliases = Mapping[ops.TableNode, int]\nDeps = Deque[Tuple[int, ops.TableNode]]\n\n\nclass Alias:\n __slots__ = (\"value\",)\n\n def __init__(self, value: int) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return f\"r{self.value}\"\n\n\ndef fmt(expr: ir.Expr) -> str:\n \"\"\"Format `expr`.\n\n Main entry point for the `Expr.__repr__` implementation.\n\n Returns\n -------\n str\n Formatted expression\n \"\"\"\n *deps, root = graph.toposort(expr.op()).keys()\n deps = collections.deque(\n (Alias(alias), dep)\n for alias, dep in enumerate(\n dep for dep in deps if isinstance(dep, ops.TableNode)\n )\n )\n\n aliases = {dep: alias for alias, dep in deps}\n pieces = []\n\n while deps:\n alias, node = deps.popleft()\n formatted = fmt_table_op(node, aliases=aliases, deps=deps)\n pieces.append(f\"{alias} := {formatted}\")\n\n name = expr.get_name() if expr.has_name() else None\n pieces.append(fmt_root(root, name=name, aliases=aliases, deps=deps))\n depth = ibis.options.repr.depth or 0\n if depth and depth < len(pieces):\n return fmt_truncated(pieces, depth=depth)\n return \"\\n\\n\".join(pieces)\n\n\ndef fmt_truncated(\n pieces: Iterable[str],\n *,\n depth: int,\n sep: str = \"\\n\\n\",\n ellipsis: str = util.VERTICAL_ELLIPSIS,\n) -> str:\n if depth == 1:\n return pieces[-1]\n\n first_n = depth // 2\n last_m = depth - first_n\n return sep.join([*pieces[:first_n], ellipsis, *pieces[-last_m:]])\n\n\ndef selection_maxlen(nodes: Iterable[ops.Node]) -> int:\n \"\"\"Compute the length of the longest name of input expressions.\n\n Parameters\n ----------\n expressions\n Expressions whose name to compute the maximum length of\n\n Returns\n -------\n int\n Max length\n \"\"\"\n try:\n return max(len(node.name) for node in nodes if isinstance(node, ops.Named))\n except ValueError:\n return 0\n\n\[email protected]\ndef fmt_root(op: ops.Node, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Fallback formatting implementation.\"\"\"\n raw_parts = fmt_fields(\n op,\n dict.fromkeys(op.argnames, fmt_value),\n aliases=aliases,\n )\n return f\"{op.__class__.__name__}\\n{raw_parts}\"\n\n\n@fmt_root.register\ndef _fmt_root_table_node(op: ops.TableNode, **kwargs: Any) -> str:\n return fmt_table_op(op, **kwargs)\n\n\n@fmt_root.register\ndef _fmt_root_value_op(op: ops.Value, *, name: str, aliases: Aliases, **_: Any) -> str:\n value = fmt_value(op, aliases=aliases)\n prefix = f\"{name}: \" if name is not None else \"\"\n return f\"{prefix}{value}{type_info(op.to_expr().type())}\"\n\n\n@fmt_root.register(ops.SortKey)\ndef _fmt_root_sort_key(op: ops.SortKey, *, aliases: Aliases, **_: Any) -> str:\n return fmt_value(op, aliases=aliases)\n\n\[email protected]\ndef fmt_table_op(op: ops.TableNode, **_: Any) -> str:\n assert False, f\"`fmt_table_op` not implemented for operation: {type(op)}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n top = f\"{op.__class__.__name__}: {op.name}\"\n formatted_schema = fmt_schema(op.schema)\n return f\"{top}\\n{formatted_schema}\"\n\n\ndef fmt_schema(schema: sch.Schema) -> str:\n \"\"\"Format `schema`.\n\n Parameters\n ----------\n schema\n Ibis schema to format\n\n Returns\n -------\n str\n Formatted schema\n \"\"\"\n names = schema.names\n maxlen = max(map(len, names))\n cols = [f\"{name:<{maxlen}} {typ}\" for name, typ in schema.items()]\n depth = ibis.options.repr.table_columns\n if depth is not None and depth < len(cols):\n first_column_name = names[0]\n raw = fmt_truncated(\n cols,\n depth=depth,\n sep=\"\\n\",\n ellipsis=util.VERTICAL_ELLIPSIS.center(len(first_column_name)),\n )\n else:\n raw = \"\\n\".join(cols)\n\n return util.indent(raw, spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_query_result(op: ops.SQLQueryResult, **_: Any) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}\\n{util.indent(query, spaces=2)}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_view(op: ops.View, *, aliases: Aliases, **_: Any) -> str:\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n return f\"{top}[{aliases[op.child]}]: {op.name}\\n{schema_field}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_sql_view(\n op: ops.SQLStringView,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n short_query = textwrap.shorten(\n op.query,\n ibis.options.repr.query_text_length,\n placeholder=f\" {util.HORIZONTAL_ELLIPSIS}\",\n )\n query = f\"query: {short_query!r}\"\n top = op.__class__.__name__\n formatted_schema = fmt_schema(op.schema)\n schema_field = util.indent(f\"schema:\\n{formatted_schema}\", spaces=2)\n components = [\n f\"{top}[{aliases[op.child]}]: {op.name}\",\n util.indent(query, spaces=2),\n schema_field,\n ]\n return \"\\n\".join(components)\n\n\[email protected]\ndef fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n assert False, f\"join type {type(op)} not implemented\"\n\n\n@fmt_join.register(ops.Join)\ndef _fmt_join(op: ops.Join, *, aliases: Aliases) -> tuple[str, str]:\n # format the operator and its relation inputs\n left = aliases[op.left]\n right = aliases[op.right]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n\n # format the join predicates\n # if only one, put it directly after the join on thes same line\n # if more than one put each on a separate line\n preds = op.predicates\n formatted_preds = [fmt_value(pred, aliases=aliases) for pred in preds]\n has_one_pred = len(preds) == 1\n sep = \" \" if has_one_pred else \"\\n\"\n joined_predicates = util.indent(\n \"\\n\".join(formatted_preds),\n spaces=2 * (not has_one_pred),\n )\n trailing_sep = \"\\n\" + \"\\n\" * (not has_one_pred)\n return f\"{top}{sep}{joined_predicates}\", trailing_sep\n\n\n@fmt_join.register(ops.AsOfJoin)\ndef _fmt_asof_join(op: ops.AsOfJoin, *, aliases: Aliases) -> tuple[str, str]:\n left = aliases[op.left]\n right = aliases[op.right]\n top = f\"{op.__class__.__name__}[{left}, {right}]\"\n raw_parts = fmt_fields(\n op,\n dict(predicates=fmt_value, by=fmt_value, tolerance=fmt_value),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\", \"\\n\\n\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_join(\n op: ops.Join,\n *,\n aliases: Aliases,\n deps: Deps,\n **_: Any,\n) -> str:\n # first, format the current join operation\n result, join_sep = fmt_join(op, aliases=aliases)\n formatted_joins = [result, join_sep]\n\n # process until the first non-Join dependency is popped in other words\n # process all runs of joins\n alias, current = None, None\n if deps:\n alias, current = deps.popleft()\n\n while isinstance(current, ops.Join):\n # copy the alias so that mutations to the value aren't shared\n # format the `current` join\n formatted_join, join_sep = fmt_join(current, aliases=aliases)\n formatted_joins.append(f\"{alias} := {formatted_join}\")\n formatted_joins.append(join_sep)\n\n if not deps:\n break\n\n alias, current = deps.popleft()\n\n if current is not None and not isinstance(current, ops.Join):\n # the last node popped from `deps` isn't a join which means we\n # still need to process it, so we put it at the front of the queue\n deps.appendleft((alias, current))\n\n # we don't want the last trailing separator so remove it from the end\n formatted_joins.pop()\n return \"\".join(formatted_joins)\n\n\n@fmt_table_op.register\ndef _(op: ops.CrossJoin, *, aliases: Aliases, **_: Any) -> str:\n left = aliases[op.left]\n right = aliases[op.right]\n return f\"{op.__class__.__name__}[{left}, {right}]\"\n\n\ndef _fmt_set_op(\n op: ops.SetOp,\n *,\n aliases: Aliases,\n distinct: bool | None = None,\n) -> str:\n args = [str(aliases[op.left]), str(aliases[op.right])]\n if distinct is not None:\n args.append(f\"distinct={distinct}\")\n return f\"{op.__class__.__name__}[{', '.join(args)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_set_op(op: ops.SetOp, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_union(op: ops.Union, *, aliases: Aliases, **_: Any) -> str:\n return _fmt_set_op(op, aliases=aliases, distinct=op.distinct)\n\n\n@fmt_table_op.register(ops.SelfReference)\n@fmt_table_op.register(ops.Distinct)\ndef _fmt_table_op_self_reference_distinct(\n op: ops.Distinct | ops.SelfReference,\n *,\n aliases: Aliases,\n **_: Any,\n) -> str:\n return f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_fillna(op: ops.FillNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(op, dict(replacements=fmt_value), aliases=aliases)\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_dropna(op: ops.DropNa, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n how = f\"how: {op.how!r}\"\n raw_parts = fmt_fields(op, dict(subset=fmt_value), aliases=aliases)\n return f\"{top}\\n{util.indent(how, spaces=2)}\\n{raw_parts}\"\n\n\ndef fmt_fields(\n op: ops.TableNode,\n fields: Mapping[str, Callable[[Any, Aliases], str]],\n *,\n aliases: Aliases,\n) -> str:\n parts = []\n\n for field, formatter in fields.items():\n if exprs := [\n expr for expr in util.promote_list(getattr(op, field)) if expr is not None\n ]:\n field_fmt = [formatter(expr, aliases=aliases) for expr in exprs]\n\n parts.append(f\"{field}:\")\n parts.append(util.indent(\"\\n\".join(field_fmt), spaces=2))\n\n return util.indent(\"\\n\".join(parts), spaces=2)\n\n\n@fmt_table_op.register\ndef _fmt_table_op_selection(op: ops.Selection, *, aliases: Aliases, **_: Any) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n selections=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.selections),\n ),\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_aggregation(\n op: ops.Aggregation, *, aliases: Aliases, **_: Any\n) -> str:\n top = f\"{op.__class__.__name__}[{aliases[op.table]}]\"\n raw_parts = fmt_fields(\n op,\n dict(\n metrics=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.metrics),\n ),\n by=functools.partial(\n fmt_selection_column,\n maxlen=selection_maxlen(op.by),\n ),\n having=fmt_value,\n predicates=fmt_value,\n sort_keys=fmt_value,\n ),\n aliases=aliases,\n )\n return f\"{top}\\n{raw_parts}\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_limit(op: ops.Limit, *, aliases: Aliases, **_: Any) -> str:\n params = [str(aliases[op.table]), f\"n={op.n:d}\"]\n if offset := op.offset:\n params.append(f\"offset={offset:d}\")\n return f\"{op.__class__.__name__}[{', '.join(params)}]\"\n\n\n@fmt_table_op.register\ndef _fmt_table_op_in_memory_table(op: ops.InMemoryTable, **_: Any) -> str:\n # arbitrary limit, but some value is needed to avoid a huge repr\n max_length = 10\n pretty_data = rich.pretty.pretty_repr(op.data, max_length=max_length)\n return \"\\n\".join(\n [\n op.__class__.__name__,\n util.indent(\"data:\", spaces=2),\n util.indent(pretty_data, spaces=4),\n ]\n )\n\n\[email protected]\ndef fmt_selection_column(value_expr: object, **_: Any) -> str:\n assert False, (\n \"expression type not implemented for \"\n f\"fmt_selection_column: {type(value_expr)}\"\n )\n\n\ndef type_info(datatype: dt.DataType) -> str:\n \"\"\"Format `datatype` for display next to a column.\"\"\"\n return f\" # {datatype}\" * ibis.options.repr.show_types\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_sequence(node: ops.NodeList, **kwargs):\n return \"\\n\".join(fmt_selection_column(value, **kwargs) for value in node.values)\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_value_expr(\n node: ops.Value, *, aliases: Aliases, maxlen: int = 0\n) -> str:\n name = f\"{node.name}:\"\n # the additional 1 is for the colon\n aligned_name = f\"{name:<{maxlen + 1}}\"\n value = fmt_value(node, aliases=aliases)\n dtype = type_info(node.output_dtype)\n return f\"{aligned_name} {value}{dtype}\"\n\n\n@fmt_selection_column.register\ndef _fmt_selection_column_table_expr(\n node: ops.TableNode, *, aliases: Aliases, **_: Any\n) -> str:\n return str(aliases[node])\n\n\n_BIN_OP_CHARS = {\n # comparison operations\n ops.Equals: \"==\",\n ops.IdenticalTo: \"===\",\n ops.NotEquals: \"!=\",\n ops.Less: \"<\",\n ops.LessEqual: \"<=\",\n ops.Greater: \">\",\n ops.GreaterEqual: \">=\",\n # arithmetic\n ops.Add: \"+\",\n ops.Subtract: \"-\",\n ops.Multiply: \"*\",\n ops.Divide: \"/\",\n ops.FloorDivide: \"//\",\n ops.Modulus: \"%\",\n ops.Power: \"**\",\n # temporal operations\n ops.DateAdd: \"+\",\n ops.DateSub: \"-\",\n ops.DateDiff: \"-\",\n ops.TimeAdd: \"+\",\n ops.TimeSub: \"-\",\n ops.TimeDiff: \"-\",\n ops.TimestampAdd: \"+\",\n ops.TimestampSub: \"-\",\n ops.TimestampDiff: \"-\",\n ops.IntervalAdd: \"+\",\n ops.IntervalSubtract: \"-\",\n ops.IntervalMultiply: \"*\",\n ops.IntervalFloorDivide: \"//\",\n # boolean operators\n ops.And: \"&\",\n ops.Or: \"|\",\n ops.Xor: \"^\",\n}\n\n\[email protected]\ndef fmt_value(obj, **_: Any) -> str:\n \"\"\"Format a value expression or operation.\n\n [`repr`][repr] the object if we don't have a specific formatting\n rule.\n \"\"\"\n return repr(obj)\n\n\n@fmt_value.register\ndef _fmt_value_function_type(func: types.FunctionType, **_: Any) -> str:\n return func.__name__\n\n\n@fmt_value.register\ndef _fmt_value_node(op: ops.Node, **_: Any) -> str:\n assert False, f\"`fmt_value` not implemented for operation: {type(op)}\"\n\n\n@fmt_value.register\ndef _fmt_value_sequence(op: ops.NodeList, **kwargs: Any) -> str:\n return \", \".join([fmt_value(value, **kwargs) for value in op])\n\n\n@fmt_value.register\ndef _fmt_value_expr(op: ops.Value, *, aliases: Aliases) -> str:\n \"\"\"Format a value expression.\n\n Forwards the call on to the specific operation dispatch rule.\n \"\"\"\n return fmt_value(op, aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_binary_op(op: ops.Binary, *, aliases: Aliases) -> str:\n left = fmt_value(op.left, aliases=aliases)\n right = fmt_value(op.right, aliases=aliases)\n try:\n op_char = _BIN_OP_CHARS[type(op)]\n except KeyError:\n return f\"{type(op).__name__}({left}, {right})\"\n else:\n return f\"{left} {op_char} {right}\"\n\n\n@fmt_value.register\ndef _fmt_value_negate(op: ops.Negate, *, aliases: Aliases) -> str:\n op_name = \"Not\" if isinstance(op.output_dtype, dt.Boolean) else \"Negate\"\n operand = fmt_value(op.arg, aliases=aliases)\n return f\"{op_name}({operand})\"\n\n\n@fmt_value.register\ndef _fmt_value_literal(op: ops.Literal, **_: Any) -> str:\n if isinstance(op.dtype, dt.Interval):\n return f\"{op.value} {op.dtype.unit}\"\n return repr(op.value)\n\n\n@fmt_value.register\ndef _fmt_value_datatype(datatype: dt.DataType, **_: Any) -> str:\n return str(datatype)\n\n\n@fmt_value.register\ndef _fmt_value_value_op(op: ops.Value, *, aliases: Aliases) -> str:\n args = []\n # loop over argument names and original expression\n for argname, orig_expr in zip(op.argnames, op.args):\n # promote argument to a list, so that we don't accidentially repr\n # entire subtrees when all we want is the formatted argument value\n if exprs := [expr for expr in util.promote_list(orig_expr) if expr is not None]:\n # format the individual argument values\n formatted_args = \", \".join(\n fmt_value(expr, aliases=aliases) for expr in exprs\n )\n # if the original argument was a non-string iterable, display it as\n # a list\n value = (\n f\"[{formatted_args}]\" if util.is_iterable(orig_expr) else formatted_args\n )\n # `arg` and `expr` are noisy, so we ignore printing them as a\n # special case\n if argname not in (\"arg\", \"expr\"):\n formatted = f\"{argname}={value}\"\n else:\n formatted = value\n args.append(formatted)\n\n return f\"{op.__class__.__name__}({', '.join(args)})\"\n\n\n@fmt_value.register\ndef _fmt_value_alias(op: ops.Alias, *, aliases: Aliases) -> str:\n return fmt_value(op.arg, aliases=aliases)\n\n\n@fmt_value.register\ndef _fmt_value_table_column(op: ops.TableColumn, *, aliases: Aliases) -> str:\n return f\"{aliases[op.table]}.{op.name}\"\n\n\n@fmt_value.register\ndef _fmt_value_scalar_parameter(op: ops.ScalarParameter, **_: Any) -> str:\n return f\"$({op.dtype})\"\n\n\n@fmt_value.register\ndef _fmt_value_sort_key(op: ops.SortKey, *, aliases: Aliases) -> str:\n expr = fmt_value(op.expr, aliases=aliases)\n prefix = \"asc\" if op.ascending else \"desc\"\n return f\"{prefix} {expr}\"\n\n\n@fmt_value.register\ndef _fmt_value_physical_table(op: ops.PhysicalTable, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression.\n An example is `table.count()`.\n \"\"\"\n return op.name\n\n\n@fmt_value.register\ndef _fmt_value_table_node(op: ops.TableNode, *, aliases: Aliases, **_: Any) -> str:\n \"\"\"Format a table as value.\n\n This function is called when a table is used in a value expression.\n An example is `table.count()`.\n \"\"\"\n return f\"{aliases[op]}\"\n\n\n@fmt_value.register\ndef _fmt_value_string_sql_like(op: ops.StringSQLLike, *, aliases: Aliases) -> str:\n expr = fmt_value(op.arg, aliases=aliases)\n pattern = fmt_value(op.pattern, aliases=aliases)\n prefix = \"I\" * isinstance(op, ops.StringSQLILike)\n return f\"{expr} {prefix}LIKE {pattern}\"\n\n\n@fmt_value.register\ndef _fmt_value_window(win: win.Window, *, aliases: Aliases) -> str:\n args = []\n for field, value in (\n (\"_group_by\", win._group_by),\n (\"_order_by\", win._order_by),\n (\"preceding\", win.preceding),\n (\"following\", win.following),\n (\"max_lookback\", win.max_lookback),\n (\"how\", win.how),\n ):\n disp_field = field.lstrip(\"_\")\n if value is not None:\n if isinstance(value, tuple):\n # don't show empty sequences\n if not value:\n continue\n elements = \", \".join(\n fmt_value(\n arg.op() if isinstance(arg, ir.Expr) else arg,\n aliases=aliases,\n )\n for arg in value\n )\n formatted = f\"[{elements}]\"\n else:\n formatted = fmt_value(value, aliases=aliases)\n args.append(f\"{disp_field}={formatted}\")\n return f\"{win.__class__.__name__}({', '.join(args)})\"\n",
"path": "ibis/expr/format.py"
}
] | diff --git a/ibis/expr/format.py b/ibis/expr/format.py
index 3c6065baf22a..1d3ae3e7075d 100644
--- a/ibis/expr/format.py
+++ b/ibis/expr/format.py
@@ -643,7 +643,7 @@ def _fmt_value_table_node(op: ops.TableNode, *, aliases: Aliases, **_: Any) -> s
This function is called when a table is used in a value expression.
An example is `table.count()`.
"""
- return f"{aliases[op.table]}"
+ return f"{aliases[op]}"
@fmt_value.register
diff --git a/ibis/tests/expr/test_format.py b/ibis/tests/expr/test_format.py
index cc5c47ebccf6..454d7facb7b0 100644
--- a/ibis/tests/expr/test_format.py
+++ b/ibis/tests/expr/test_format.py
@@ -275,6 +275,21 @@ def test_tables_have_format_value_rules(cls):
assert cls in ibis.expr.format.fmt_value.registry
[email protected](
+ "f",
+ [
+ lambda t1, t2: t1.count(),
+ lambda t1, t2: t1.join(t2, t1.a == t2.a).count(),
+ lambda t1, t2: ibis.union(t1, t2).count(),
+ ],
+)
+def test_table_value_expr(f):
+ t1 = ibis.table([("a", "int"), ("b", "float")], name="t1")
+ t2 = ibis.table([("a", "int"), ("b", "float")], name="t2")
+ expr = f(t1, t2)
+ repr(expr) # smoketest
+
+
def test_window_no_group_by():
t = ibis.table(dict(a="int64", b="string"), name="t")
expr = t.a.mean().over(ibis.window(preceding=0))
|
scalableminds__webknossos-libs-312 | Convenience for wkcuber.api
To open/create a dataset with the cool new high-level API the following code is required:
```python
from wkcuber.api.Dataset import WKDataset
from pathlib import Path
ds1 = WKDataset.create(Path("path") / "to" / "dataset1", scale=(128,128,128))
ds2 = WKDataset.open(Path("path") / "to" / "dataset2")
```
For one-off scripts, I think that could be a bit more convenient, if we had an API like this
```python
from wkcuber import WKDataset
ds1 = WKDataset.create("path/to/dataset1", scale=(128, 128, 128))
ds2 = WKDataset.open("path/to/dataset2")
```
Any thoughts? @rschwanhold @jstriebel @philippotto
| [
{
"content": "from .cubing import cubing\nfrom .downsampling import downsample_mags\nfrom .compress import compress_mag\nfrom .metadata import write_webknossos_metadata\n",
"path": "wkcuber/__init__.py"
}
] | [
{
"content": "from .api.Dataset import WKDataset\nfrom .cubing import cubing\nfrom .downsampling import downsample_mags\nfrom .compress import compress_mag\nfrom .mag import Mag\nfrom .metadata import write_webknossos_metadata\n",
"path": "wkcuber/__init__.py"
}
] | diff --git a/tests/test_reexport.py b/tests/test_reexport.py
new file mode 100644
index 000000000..124704668
--- /dev/null
+++ b/tests/test_reexport.py
@@ -0,0 +1,8 @@
+from wkcuber import Mag, WKDataset
+from wkcuber.mag import Mag as _Mag
+from wkcuber.api.Dataset import WKDataset as _WKDataset
+
+
+def test_reexport_classes() -> None:
+ assert Mag == _Mag, "Mag exports should be the same class"
+ assert WKDataset == _WKDataset, "WKDataset exports should be the same class"
diff --git a/wkcuber/__init__.py b/wkcuber/__init__.py
index e53f7a6ee..e1298544c 100644
--- a/wkcuber/__init__.py
+++ b/wkcuber/__init__.py
@@ -1,4 +1,6 @@
+from .api.Dataset import WKDataset
from .cubing import cubing
from .downsampling import downsample_mags
from .compress import compress_mag
+from .mag import Mag
from .metadata import write_webknossos_metadata
|
opsdroid__opsdroid-1241 | Exiting opsdroid with ctrl+c fails with exception
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description
I am trying to build a Slack bot using Opsdroid (master branch). When pressing `ctrl+c` to exit opsdroid, the process does not stop and throws an error.
## Steps to Reproduce
1. Start opsdroid and wait for it to run
```
opsdroid start
```
2. Press `ctrl+c` to exit the process
## Expected Functionality
The opsdroid process should exit on pressing `ctrl+c`.
## Experienced Functionality
The opsdroid process fails to exit with an exception. The debug log is as follows:
```
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid v0.16.0+82.g4c55e97
INFO opsdroid: ========================================
INFO opsdroid: You can customise your opsdroid by modifying your configuration.yaml
INFO opsdroid: Read more at: http://opsdroid.readthedocs.io/#configuration
INFO opsdroid: Watch the Get Started Videos at: http://bit.ly/2fnC0Fh
INFO opsdroid: Install Opsdroid Desktop at:
https://github.com/opsdroid/opsdroid-desktop/releases
INFO opsdroid: ========================================
WARNING opsdroid.loader: No databases in configuration.This will cause skills which store things in memory to lose data when opsdroid is restarted.
INFO opsdroid.connector.slack: Connecting to Slack
INFO opsdroid.connector.slack: Connected successfully
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
^CINFO opsdroid.core: Received stop signal, exiting.
INFO opsdroid.core: Removing skills...
INFO opsdroid.core: Removed hello
INFO opsdroid.core: Removed seen
INFO opsdroid.core: Removed help
INFO opsdroid.core: Stopping connector slack...
ERROR: Unhandled exception in opsdroid, exiting...
Caught exception
{'message': 'Task exception was never retrieved', 'exception': TypeError("object NoneType can't be used in 'await' expression",), 'future': <Task finished coro=<OpsDroid.handle_signal() done, defined at /home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/core.py:147> exception=TypeError("object NoneType can't be used in 'await' expression",)>}
WARNING slack.rtm.client: Websocket was closed.
```
## Versions
- **Opsdroid version:** master branch in git
- **Python version:** 3.6.8
- **OS/Docker version:** Ubuntu 18.04 LTS
## Configuration File
Please include your version of the configuration file below.
```yaml
# Your code goes here.
welcome-message: true
connectors:
- name: slack
api-token: "<Bot OAuth Token>"
skills:
- name: hello
- name: seen
- name: help
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
| [
{
"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport ssl\nimport certifi\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n",
"path": "opsdroid/connector/slack/__init__.py"
}
] | [
{
"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport ssl\nimport certifi\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n",
"path": "opsdroid/connector/slack/__init__.py"
}
] | diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
index 833b32730..6cca1d43e 100644
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -87,7 +87,7 @@ async def connect(self):
async def disconnect(self):
"""Disconnect from Slack."""
- await self.slack_rtm.stop()
+ self.slack_rtm.stop()
self.listening = False
async def listen(self):
|
TabbycatDebate__tabbycat-2348 | Crash when generating QF draw (WS)
**Running:** a1ca1a390866199e1884db12c215ddaa867a98dc
When generating the draw for the first elimination round in a WS tournament, I encountered this exception:
```python
[2023-07-09 12:01:47,564] ERROR django.request: Internal Server Error: /xxx-yyz/admin/draw/round/7/create/
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 56, in inner
response = get_response(request)
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.9/site-packages/django/views/generic/base.py", line 103, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/django/contrib/auth/mixins.py", line 135, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/tcd/tabbycat/tournaments/mixins.py", line 125, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/django/views/generic/base.py", line 142, in dispatch
return handler(request, *args, **kwargs)
File "/tcd/tabbycat/draw/views.py", line 664, in post
manager.create()
File "/tcd/tabbycat/draw/manager.py", line 157, in create
drawer = DrawGenerator(self.teams_in_debate, generator_type, teams,
File "/tcd/tabbycat/draw/generator/__init__.py", line 93, in DrawGenerator
return klass(teams, results, rrseq, **kwargs)
File "/tcd/tabbycat/draw/generator/common.py", line 182, in __init__
super().__init__(teams, results, rrseq, **kwargs)
File "/tcd/tabbycat/draw/generator/common.py", line 73, in __init__
raise ValueError("Unrecognised options: " + ", ".join(unrecognised))
ValueError: Unrecognised options: avoid_conflicts
```
I quickly patched around it like so and we manually confirmed the draw was correct:
```diff
diff --git a/tabbycat/draw/generator/common.py b/tabbycat/draw/generator/common.py
index 2a61de6ea..3d7167aa1 100644
--- a/tabbycat/draw/generator/common.py
+++ b/tabbycat/draw/generator/common.py
@@ -68,9 +68,10 @@ class BaseDrawGenerator:
# Compute the full dictionary of default options
self.options = self.BASE_DEFAULT_OPTIONS.copy()
self.options.update(self.DEFAULT_OPTIONS)
+ print(self.__class__)
unrecognised = [key for key in kwargs if key not in self.options]
- if unrecognised:
- raise ValueError("Unrecognised options: " + ", ".join(unrecognised))
+# if unrecognised:
+# raise ValueError("Unrecognised options: " + ", ".join(unrecognised))
self.options.update(kwargs)
def generate(self):
```
Of course, this is not a fix for the problem, just avoiding the symptoms. **I intend to find the cause of this issue and fix it in the following days**, bu I'm dropping an issue here so I don't forget
| [
{
"content": "import logging\n\nfrom django.utils.translation import gettext as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseDrawError(Exception):\n pass\n\n\nclass DrawUserError(BaseDrawError):\n \"\"\"DrawUserError is raised by any DrawGenerator class when a problem that\n would appear to be user error prevents a draw from being produced.\n DrawUserErrors are caught by the view class and shown to the user as an\n error message.\n\n Because DrawUserErrors expected and rectifier, the strings that go into them\n should be internationalised (marked for translation).\"\"\"\n pass\n\n\nclass DrawFatalError(BaseDrawError):\n \"\"\"DrawAlgorithmError is raised by any DrawGenerator class when a problem\n that is an error condition that should never (ever) happen prevents a draw\n from being produced. DrawAlgorithmError are also caught by the view class\n and shown to the user as an error message. However, because they should\n never happen, their messages are not internationalised, since that just\n creates unnecessary work for translators.\"\"\"\n pass\n\n\nclass BaseDrawGenerator:\n \"\"\"Base class for generators for all draw types, for both two-team and BP.\n \"\"\"\n\n # Subclasses must define BASE_DEFAULT_OPTIONS\n\n requires_even_teams = True\n requires_prev_results = False\n requires_rrseq = False\n\n def __init__(self, teams, results=None, rrseq=None, **kwargs):\n self.teams = teams\n self.team_flags = dict()\n self.results = results\n self.rrseq = rrseq\n\n if self.requires_even_teams:\n if not len(self.teams) % self.TEAMS_PER_DEBATE == 0:\n raise DrawUserError(_(\"The number of teams presented for the draw was not \"\n \"a multiple of %(num)d.\") % {'num': self.TEAMS_PER_DEBATE})\n if not self.teams:\n raise DrawUserError(_(\"There were no teams for the draw.\"))\n\n if results is None and self.requires_prev_results:\n raise TypeError(\"'results' is required for draw of type {0:s}\".format(\n self.__class__.__name__))\n\n if results is not None and not self.requires_prev_results:\n logger.warning(\"'results' not required for draw of type %s, will probably be ignored\",\n self.__class__.__name__)\n\n if rrseq is None and self.requires_rrseq:\n raise TypeError(\"'rrseq' (round robin sequence) is required for draw of type {0:s}\".format(\n self.__class__.__name__))\n\n # Compute the full dictionary of default options\n self.options = self.BASE_DEFAULT_OPTIONS.copy()\n self.options.update(self.DEFAULT_OPTIONS)\n unrecognised = [key for key in kwargs if key not in self.options]\n if unrecognised:\n raise ValueError(\"Unrecognised options: \" + \", \".join(unrecognised))\n self.options.update(kwargs)\n\n def generate(self):\n \"\"\"Abstract method.\"\"\"\n raise NotImplementedError\n\n def get_option_function(self, option_name, option_dict):\n option = self.options[option_name]\n if callable(option):\n return option\n try:\n return getattr(self, option_dict[option])\n except KeyError:\n raise ValueError(\"Invalid option for {1}: {0}\".format(option, option_name))\n\n def add_team_flag(self, team, flag):\n \"\"\"Attaches a flag to a team.\n Child classes may use this when flags should follow teams, but\n eventually be attached to pairings.\"\"\"\n flags = self.team_flags.setdefault(team, list())\n flags.append(flag)\n\n def annotate_team_flags(self, pairings):\n \"\"\"Applies the team flags to the pairings given.\n Child classes that use team flags should call this method as the last\n thing before the draw is returned.\"\"\"\n for pairing in pairings:\n for team in pairing.teams:\n if team in self.team_flags:\n pairing.add_team_flags(team, self.team_flags[team])\n\n @classmethod\n def available_options(cls):\n keys = set(cls.BASE_DEFAULT_OPTIONS.keys())\n keys |= set(cls.DEFAULT_OPTIONS.keys())\n return sorted(list(keys))\n\n def check_teams_for_attribute(self, name, choices=None, checkfunc=None):\n \"\"\"Checks that all teams have the specified attribute, and raises a\n DrawFatalError if they don't. This should be called during the\n constructor. Note: Whether to run this check will sometimes be\n conditional on options supplied to the DrawGenerator. 'name' is the name\n of the attribute. 'choices', if specified, is a list of allowed values\n for the attribute.\n \"\"\"\n has_attribute = [hasattr(x, name) for x in self.teams]\n if not all(has_attribute):\n offending_teams = has_attribute.count(False)\n raise DrawFatalError(\"{0} out of {1} teams don't have a '{name}' attribute.\".format(\n offending_teams, len(self.teams), name=name))\n\n if choices:\n attribute_value_valid = [getattr(x, name) in choices for x in self.teams]\n elif checkfunc:\n attribute_value_valid = [checkfunc(getattr(x, name)) for x in self.teams]\n else:\n return\n\n if not all(attribute_value_valid):\n offending_teams = attribute_value_valid.count(False)\n message = \"{0} out of {1} teams have an invalid '{name}' attribute.\".format(offending_teams, len(self.teams), name=name)\n if choices:\n message += \" Valid choices: \" + \", \".join(map(repr, choices))\n raise DrawFatalError(message)\n\n\nclass BasePairDrawGenerator(BaseDrawGenerator):\n \"\"\"Base class for generators for all draw types.\n Options:\n \"side_allocations\" - Side allocation method, one of:\n \"balance\" - the team that has affirmed less in prior rounds affirms,\n or randomly if both teams have affirmed the same number of times.\n If used, team objects must have an `side_history` attribute.\n \"preallocated\" - teams were pre-allocated sides. If used, teams must\n have an 'allocated_side' attribute.\n \"none\" - leave sides as they were when the pairings were drawn.\n (This is almost never desirable.)\n \"random\" - allocate randomly.\n \"avoid_history\" - if True, draw tries to avoid pairing teams that have\n seen each other before, and tries harder if they've seen each other\n multiple times.\n \"history_penalty\" -\n \"avoid_institution\" - if True, draw tries to avoid pairing teams that\n are from the same institution.\n \"side_penalty\" - A penalty to apply when optimizing with side balance\n \"\"\"\n\n BASE_DEFAULT_OPTIONS = {\n \"side_allocations\" : \"balance\",\n \"avoid_history\" : True,\n \"avoid_institution\" : True,\n \"history_penalty\" : 1e3,\n \"institution_penalty\" : 1,\n \"side_penalty\" : 0,\n \"pullup_debates_penalty\": 0,\n \"pairing_penalty\" : 0,\n }\n\n TEAMS_PER_DEBATE = 2\n\n requires_even_teams = True\n requires_prev_results = False\n requires_rrseq = False\n\n # All subclasses must define this with any options that may exist.\n DEFAULT_OPTIONS = {}\n\n def __init__(self, teams, results=None, rrseq=None, **kwargs):\n super().__init__(teams, results, rrseq, **kwargs)\n\n # Check for required team attributes. Subclasses might do more.\n if self.options[\"avoid_history\"]:\n self.check_teams_for_attribute(\"seen\", checkfunc=callable)\n if self.options[\"avoid_institution\"]:\n self.check_teams_for_attribute(\"institution\")\n\n def allocate_sides(self, pairings):\n if self.options[\"side_allocations\"] == \"balance\":\n for pairing in pairings:\n pairing.balance_sides()\n elif self.options[\"side_allocations\"] == \"random\":\n for pairing in pairings:\n pairing.shuffle_sides()\n elif self.options[\"side_allocations\"] not in [\"none\", \"preallocated\"]:\n raise ValueError(\"side_allocations setting not recognized: {0!r}\".format(self.options[\"side_allocations\"]))\n\n\nclass BaseBPDrawGenerator(BaseDrawGenerator):\n BASE_DEFAULT_OPTIONS = {}\n TEAMS_PER_DEBATE = 4\n\n\nclass EliminationDrawMixin:\n \"\"\"Mixin for elimination draws.\"\"\"\n\n def generate(self):\n pairings = self.make_pairings()\n self.shuffle_sides(pairings)\n return pairings\n\n def shuffle_sides(self, pairings):\n for pairing in pairings:\n pairing.shuffle_sides()\n\n def make_pairings(self):\n raise NotImplementedError\n\n\nclass ManualDrawGenerator(BaseDrawGenerator):\n \"\"\"Returns an empty draw.\n Since this doesn't really do anything, it works for both two-team and BP.\n \"\"\"\n DEFAULT_OPTIONS = {}\n BASE_DEFAULT_OPTIONS = {}\n requires_even_teams = False\n requires_prev_results = False\n\n def generate(self):\n return []\n",
"path": "tabbycat/draw/generator/common.py"
}
] | [
{
"content": "import logging\n\nfrom django.utils.translation import gettext as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseDrawError(Exception):\n pass\n\n\nclass DrawUserError(BaseDrawError):\n \"\"\"DrawUserError is raised by any DrawGenerator class when a problem that\n would appear to be user error prevents a draw from being produced.\n DrawUserErrors are caught by the view class and shown to the user as an\n error message.\n\n Because DrawUserErrors expected and rectifier, the strings that go into them\n should be internationalised (marked for translation).\"\"\"\n pass\n\n\nclass DrawFatalError(BaseDrawError):\n \"\"\"DrawAlgorithmError is raised by any DrawGenerator class when a problem\n that is an error condition that should never (ever) happen prevents a draw\n from being produced. DrawAlgorithmError are also caught by the view class\n and shown to the user as an error message. However, because they should\n never happen, their messages are not internationalised, since that just\n creates unnecessary work for translators.\"\"\"\n pass\n\n\nclass BaseDrawGenerator:\n \"\"\"Base class for generators for all draw types, for both two-team and BP.\n \"\"\"\n\n # Subclasses must define BASE_DEFAULT_OPTIONS\n\n requires_even_teams = True\n requires_prev_results = False\n requires_rrseq = False\n\n def __init__(self, teams, results=None, rrseq=None, **kwargs):\n self.teams = teams\n self.team_flags = dict()\n self.results = results\n self.rrseq = rrseq\n\n if self.requires_even_teams:\n if not len(self.teams) % self.TEAMS_PER_DEBATE == 0:\n raise DrawUserError(_(\"The number of teams presented for the draw was not \"\n \"a multiple of %(num)d.\") % {'num': self.TEAMS_PER_DEBATE})\n if not self.teams:\n raise DrawUserError(_(\"There were no teams for the draw.\"))\n\n if results is None and self.requires_prev_results:\n raise TypeError(\"'results' is required for draw of type {0:s}\".format(\n self.__class__.__name__))\n\n if results is not None and not self.requires_prev_results:\n logger.warning(\"'results' not required for draw of type %s, will probably be ignored\",\n self.__class__.__name__)\n\n if rrseq is None and self.requires_rrseq:\n raise TypeError(\"'rrseq' (round robin sequence) is required for draw of type {0:s}\".format(\n self.__class__.__name__))\n\n # Compute the full dictionary of default options\n self.options = self.BASE_DEFAULT_OPTIONS.copy()\n self.options.update(self.DEFAULT_OPTIONS)\n unrecognised = [key for key in kwargs if key not in self.options]\n if unrecognised:\n raise ValueError(\"Unrecognised options: \" + \", \".join(unrecognised))\n self.options.update(kwargs)\n\n def generate(self):\n \"\"\"Abstract method.\"\"\"\n raise NotImplementedError\n\n def get_option_function(self, option_name, option_dict):\n option = self.options[option_name]\n if callable(option):\n return option\n try:\n return getattr(self, option_dict[option])\n except KeyError:\n raise ValueError(\"Invalid option for {1}: {0}\".format(option, option_name))\n\n def add_team_flag(self, team, flag):\n \"\"\"Attaches a flag to a team.\n Child classes may use this when flags should follow teams, but\n eventually be attached to pairings.\"\"\"\n flags = self.team_flags.setdefault(team, list())\n flags.append(flag)\n\n def annotate_team_flags(self, pairings):\n \"\"\"Applies the team flags to the pairings given.\n Child classes that use team flags should call this method as the last\n thing before the draw is returned.\"\"\"\n for pairing in pairings:\n for team in pairing.teams:\n if team in self.team_flags:\n pairing.add_team_flags(team, self.team_flags[team])\n\n @classmethod\n def available_options(cls):\n keys = set(cls.BASE_DEFAULT_OPTIONS.keys())\n keys |= set(cls.DEFAULT_OPTIONS.keys())\n return sorted(list(keys))\n\n def check_teams_for_attribute(self, name, choices=None, checkfunc=None):\n \"\"\"Checks that all teams have the specified attribute, and raises a\n DrawFatalError if they don't. This should be called during the\n constructor. Note: Whether to run this check will sometimes be\n conditional on options supplied to the DrawGenerator. 'name' is the name\n of the attribute. 'choices', if specified, is a list of allowed values\n for the attribute.\n \"\"\"\n has_attribute = [hasattr(x, name) for x in self.teams]\n if not all(has_attribute):\n offending_teams = has_attribute.count(False)\n raise DrawFatalError(\"{0} out of {1} teams don't have a '{name}' attribute.\".format(\n offending_teams, len(self.teams), name=name))\n\n if choices:\n attribute_value_valid = [getattr(x, name) in choices for x in self.teams]\n elif checkfunc:\n attribute_value_valid = [checkfunc(getattr(x, name)) for x in self.teams]\n else:\n return\n\n if not all(attribute_value_valid):\n offending_teams = attribute_value_valid.count(False)\n message = \"{0} out of {1} teams have an invalid '{name}' attribute.\".format(offending_teams, len(self.teams), name=name)\n if choices:\n message += \" Valid choices: \" + \", \".join(map(repr, choices))\n raise DrawFatalError(message)\n\n\nclass BasePairDrawGenerator(BaseDrawGenerator):\n \"\"\"Base class for generators for all draw types.\n Options:\n \"side_allocations\" - Side allocation method, one of:\n \"balance\" - the team that has affirmed less in prior rounds affirms,\n or randomly if both teams have affirmed the same number of times.\n If used, team objects must have an `side_history` attribute.\n \"preallocated\" - teams were pre-allocated sides. If used, teams must\n have an 'allocated_side' attribute.\n \"none\" - leave sides as they were when the pairings were drawn.\n (This is almost never desirable.)\n \"random\" - allocate randomly.\n \"avoid_history\" - if True, draw tries to avoid pairing teams that have\n seen each other before, and tries harder if they've seen each other\n multiple times.\n \"history_penalty\" -\n \"avoid_institution\" - if True, draw tries to avoid pairing teams that\n are from the same institution.\n \"side_penalty\" - A penalty to apply when optimizing with side balance\n \"\"\"\n\n BASE_DEFAULT_OPTIONS = {\n \"side_allocations\" : \"balance\",\n \"avoid_history\" : True,\n \"avoid_institution\" : True,\n \"history_penalty\" : 1e3,\n \"institution_penalty\" : 1,\n \"side_penalty\" : 0,\n \"pullup_debates_penalty\": 0,\n \"pairing_penalty\" : 0,\n \"avoid_conflicts\" : \"off\",\n }\n\n TEAMS_PER_DEBATE = 2\n\n requires_even_teams = True\n requires_prev_results = False\n requires_rrseq = False\n\n # All subclasses must define this with any options that may exist.\n DEFAULT_OPTIONS = {}\n\n def __init__(self, teams, results=None, rrseq=None, **kwargs):\n super().__init__(teams, results, rrseq, **kwargs)\n\n # Check for required team attributes. Subclasses might do more.\n if self.options[\"avoid_history\"]:\n self.check_teams_for_attribute(\"seen\", checkfunc=callable)\n if self.options[\"avoid_institution\"]:\n self.check_teams_for_attribute(\"institution\")\n\n def allocate_sides(self, pairings):\n if self.options[\"side_allocations\"] == \"balance\":\n for pairing in pairings:\n pairing.balance_sides()\n elif self.options[\"side_allocations\"] == \"random\":\n for pairing in pairings:\n pairing.shuffle_sides()\n elif self.options[\"side_allocations\"] not in [\"none\", \"preallocated\"]:\n raise ValueError(\"side_allocations setting not recognized: {0!r}\".format(self.options[\"side_allocations\"]))\n\n\nclass BaseBPDrawGenerator(BaseDrawGenerator):\n BASE_DEFAULT_OPTIONS = {}\n TEAMS_PER_DEBATE = 4\n\n\nclass EliminationDrawMixin:\n \"\"\"Mixin for elimination draws.\"\"\"\n\n def generate(self):\n pairings = self.make_pairings()\n self.shuffle_sides(pairings)\n return pairings\n\n def shuffle_sides(self, pairings):\n for pairing in pairings:\n pairing.shuffle_sides()\n\n def make_pairings(self):\n raise NotImplementedError\n\n\nclass ManualDrawGenerator(BaseDrawGenerator):\n \"\"\"Returns an empty draw.\n Since this doesn't really do anything, it works for both two-team and BP.\n \"\"\"\n DEFAULT_OPTIONS = {}\n BASE_DEFAULT_OPTIONS = {}\n requires_even_teams = False\n requires_prev_results = False\n\n def generate(self):\n return []\n",
"path": "tabbycat/draw/generator/common.py"
}
] | diff --git a/tabbycat/draw/generator/common.py b/tabbycat/draw/generator/common.py
index 2a61de6ea3c..6b31bf373ee 100644
--- a/tabbycat/draw/generator/common.py
+++ b/tabbycat/draw/generator/common.py
@@ -167,6 +167,7 @@ class BasePairDrawGenerator(BaseDrawGenerator):
"side_penalty" : 0,
"pullup_debates_penalty": 0,
"pairing_penalty" : 0,
+ "avoid_conflicts" : "off",
}
TEAMS_PER_DEBATE = 2
|
openai__gym-2162 | close the env when finished
https://github.com/openai/gym/blob/345c65973fc7160d8be374745a60c36869d8accc/gym/envs/box2d/lunar_lander.py#L449
Shall we add `env.close()` before returning here?
I've seen error below if it's not closed.
`ImportError: sys.meta_path is None, Python is likely shutting down`.
| [
{
"content": "\"\"\"\nRocket trajectory optimization is a classic topic in Optimal Control.\n\nAccording to Pontryagin's maximum principle it's optimal to fire engine full throttle or\nturn it off. That's the reason this environment is OK to have discreet actions (engine on or off).\n\nThe landing pad is always at coordinates (0,0). The coordinates are the first two numbers in the state vector.\nReward for moving from the top of the screen to the landing pad and zero speed is about 100..140 points.\nIf the lander moves away from the landing pad it loses reward. The episode finishes if the lander crashes or\ncomes to rest, receiving an additional -100 or +100 points. Each leg with ground contact is +10 points.\nFiring the main engine is -0.3 points each frame. Firing the side engine is -0.03 points each frame.\nSolved is 200 points.\n\nLanding outside the landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land\non its first attempt. Please see the source code for details.\n\nTo see a heuristic landing, run:\n\npython gym/envs/box2d/lunar_lander.py\n\nTo play yourself, run:\n\npython examples/agents/keyboard_agent.py LunarLander-v2\n\nCreated by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.\n\"\"\"\n\n\nimport sys, math\nimport numpy as np\n\nimport Box2D\nfrom Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding, EzPickle\n\nFPS = 50\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nMAIN_ENGINE_POWER = 13.0\nSIDE_ENGINE_POWER = 0.6\n\nINITIAL_RANDOM = 1000.0 # Set 1500 to make game harder\n\nLANDER_POLY =[\n (-14, +17), (-17, 0), (-17 ,-10),\n (+17, -10), (+17, 0), (+14, +17)\n ]\nLEG_AWAY = 20\nLEG_DOWN = 18\nLEG_W, LEG_H = 2, 8\nLEG_SPRING_TORQUE = 40\n\nSIDE_ENGINE_HEIGHT = 14.0\nSIDE_ENGINE_AWAY = 12.0\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\n\nclass ContactDetector(contactListener):\n def __init__(self, env):\n contactListener.__init__(self)\n self.env = env\n\n def BeginContact(self, contact):\n if self.env.lander == contact.fixtureA.body or self.env.lander == contact.fixtureB.body:\n self.env.game_over = True\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = True\n\n def EndContact(self, contact):\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = False\n\n\nclass LunarLander(gym.Env, EzPickle):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : FPS\n }\n\n continuous = False\n\n def __init__(self):\n EzPickle.__init__(self)\n self.seed()\n self.viewer = None\n\n self.world = Box2D.b2World()\n self.moon = None\n self.lander = None\n self.particles = []\n\n self.prev_reward = None\n\n # useful range is -1 .. +1, but spikes can be higher\n self.observation_space = spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32)\n\n if self.continuous:\n # Action is two floats [main engine, left-right engines].\n # Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.\n # Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off\n self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32)\n else:\n # Nop, fire left engine, main engine, right engine\n self.action_space = spaces.Discrete(4)\n\n self.reset()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _destroy(self):\n if not self.moon: return\n self.world.contactListener = None\n self._clean_particles(True)\n self.world.DestroyBody(self.moon)\n self.moon = None\n self.world.DestroyBody(self.lander)\n self.lander = None\n self.world.DestroyBody(self.legs[0])\n self.world.DestroyBody(self.legs[1])\n\n def reset(self):\n self._destroy()\n self.world.contactListener_keepref = ContactDetector(self)\n self.world.contactListener = self.world.contactListener_keepref\n self.game_over = False\n self.prev_shaping = None\n\n W = VIEWPORT_W/SCALE\n H = VIEWPORT_H/SCALE\n\n # terrain\n CHUNKS = 11\n height = self.np_random.uniform(0, H/2, size=(CHUNKS+1,))\n chunk_x = [W/(CHUNKS-1)*i for i in range(CHUNKS)]\n self.helipad_x1 = chunk_x[CHUNKS//2-1]\n self.helipad_x2 = chunk_x[CHUNKS//2+1]\n self.helipad_y = H/4\n height[CHUNKS//2-2] = self.helipad_y\n height[CHUNKS//2-1] = self.helipad_y\n height[CHUNKS//2+0] = self.helipad_y\n height[CHUNKS//2+1] = self.helipad_y\n height[CHUNKS//2+2] = self.helipad_y\n smooth_y = [0.33*(height[i-1] + height[i+0] + height[i+1]) for i in range(CHUNKS)]\n\n self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))\n self.sky_polys = []\n for i in range(CHUNKS-1):\n p1 = (chunk_x[i], smooth_y[i])\n p2 = (chunk_x[i+1], smooth_y[i+1])\n self.moon.CreateEdgeFixture(\n vertices=[p1,p2],\n density=0,\n friction=0.1)\n self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])\n\n self.moon.color1 = (0.0, 0.0, 0.0)\n self.moon.color2 = (0.0, 0.0, 0.0)\n\n initial_y = VIEWPORT_H/SCALE\n self.lander = self.world.CreateDynamicBody(\n position=(VIEWPORT_W/SCALE/2, initial_y),\n angle=0.0,\n fixtures = fixtureDef(\n shape=polygonShape(vertices=[(x/SCALE, y/SCALE) for x, y in LANDER_POLY]),\n density=5.0,\n friction=0.1,\n categoryBits=0x0010,\n maskBits=0x001, # collide only with ground\n restitution=0.0) # 0.99 bouncy\n )\n self.lander.color1 = (0.5, 0.4, 0.9)\n self.lander.color2 = (0.3, 0.3, 0.5)\n self.lander.ApplyForceToCenter( (\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)\n ), True)\n\n self.legs = []\n for i in [-1, +1]:\n leg = self.world.CreateDynamicBody(\n position=(VIEWPORT_W/SCALE/2 - i*LEG_AWAY/SCALE, initial_y),\n angle=(i * 0.05),\n fixtures=fixtureDef(\n shape=polygonShape(box=(LEG_W/SCALE, LEG_H/SCALE)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n )\n leg.ground_contact = False\n leg.color1 = (0.5, 0.4, 0.9)\n leg.color2 = (0.3, 0.3, 0.5)\n rjd = revoluteJointDef(\n bodyA=self.lander,\n bodyB=leg,\n localAnchorA=(0, 0),\n localAnchorB=(i * LEG_AWAY/SCALE, LEG_DOWN/SCALE),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=LEG_SPRING_TORQUE,\n motorSpeed=+0.3 * i # low enough not to jump back into the sky\n )\n if i == -1:\n rjd.lowerAngle = +0.9 - 0.5 # The most esoteric numbers here, angled legs have freedom to travel within\n rjd.upperAngle = +0.9\n else:\n rjd.lowerAngle = -0.9\n rjd.upperAngle = -0.9 + 0.5\n leg.joint = self.world.CreateJoint(rjd)\n self.legs.append(leg)\n\n self.drawlist = [self.lander] + self.legs\n\n return self.step(np.array([0, 0]) if self.continuous else 0)[0]\n\n def _create_particle(self, mass, x, y, ttl):\n p = self.world.CreateDynamicBody(\n position = (x, y),\n angle=0.0,\n fixtures = fixtureDef(\n shape=circleShape(radius=2/SCALE, pos=(0, 0)),\n density=mass,\n friction=0.1,\n categoryBits=0x0100,\n maskBits=0x001, # collide only with ground\n restitution=0.3)\n )\n p.ttl = ttl\n self.particles.append(p)\n self._clean_particles(False)\n return p\n\n def _clean_particles(self, all):\n while self.particles and (all or self.particles[0].ttl < 0):\n self.world.DestroyBody(self.particles.pop(0))\n\n def step(self, action):\n if self.continuous:\n action = np.clip(action, -1, +1).astype(np.float32)\n else:\n assert self.action_space.contains(action), \"%r (%s) invalid \" % (action, type(action))\n\n # Engines\n tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))\n side = (-tip[1], tip[0])\n dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]\n\n m_power = 0.0\n if (self.continuous and action[0] > 0.0) or (not self.continuous and action == 2):\n # Main engine\n if self.continuous:\n m_power = (np.clip(action[0], 0.0,1.0) + 1.0)*0.5 # 0.5..1.0\n assert m_power >= 0.5 and m_power <= 1.0\n else:\n m_power = 1.0\n ox = (tip[0] * (4/SCALE + 2 * dispersion[0]) +\n side[0] * dispersion[1]) # 4 is move a bit downwards, +-2 for randomness\n oy = -tip[1] * (4/SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]\n impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)\n p = self._create_particle(3.5, # 3.5 is here to make particle speed adequate\n impulse_pos[0],\n impulse_pos[1],\n m_power) # particles are just a decoration\n p.ApplyLinearImpulse((ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True)\n self.lander.ApplyLinearImpulse((-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True)\n\n s_power = 0.0\n if (self.continuous and np.abs(action[1]) > 0.5) or (not self.continuous and action in [1, 3]):\n # Orientation engines\n if self.continuous:\n direction = np.sign(action[1])\n s_power = np.clip(np.abs(action[1]), 0.5, 1.0)\n assert s_power >= 0.5 and s_power <= 1.0\n else:\n direction = action-2\n s_power = 1.0\n ox = tip[0] * dispersion[0] + side[0] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY/SCALE)\n oy = -tip[1] * dispersion[0] - side[1] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY/SCALE)\n impulse_pos = (self.lander.position[0] + ox - tip[0] * 17/SCALE,\n self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT/SCALE)\n p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)\n p.ApplyLinearImpulse((ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos\n , True)\n self.lander.ApplyLinearImpulse((-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos,\n True)\n\n self.world.Step(1.0/FPS, 6*30, 2*30)\n\n pos = self.lander.position\n vel = self.lander.linearVelocity\n state = [\n (pos.x - VIEWPORT_W/SCALE/2) / (VIEWPORT_W/SCALE/2),\n (pos.y - (self.helipad_y+LEG_DOWN/SCALE)) / (VIEWPORT_H/SCALE/2),\n vel.x*(VIEWPORT_W/SCALE/2)/FPS,\n vel.y*(VIEWPORT_H/SCALE/2)/FPS,\n self.lander.angle,\n 20.0*self.lander.angularVelocity/FPS,\n 1.0 if self.legs[0].ground_contact else 0.0,\n 1.0 if self.legs[1].ground_contact else 0.0\n ]\n assert len(state) == 8\n\n reward = 0\n shaping = \\\n - 100*np.sqrt(state[0]*state[0] + state[1]*state[1]) \\\n - 100*np.sqrt(state[2]*state[2] + state[3]*state[3]) \\\n - 100*abs(state[4]) + 10*state[6] + 10*state[7] # And ten points for legs contact, the idea is if you\n # lose contact again after landing, you get negative reward\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n reward -= m_power*0.30 # less fuel spent is better, about -30 for heuristic landing\n reward -= s_power*0.03\n\n done = False\n if self.game_over or abs(state[0]) >= 1.0:\n done = True\n reward = -100\n if not self.lander.awake:\n done = True\n reward = +100\n return np.array(state, dtype=np.float32), reward, done, {}\n\n def render(self, mode='human'):\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(0, VIEWPORT_W/SCALE, 0, VIEWPORT_H/SCALE)\n\n for obj in self.particles:\n obj.ttl -= 0.15\n obj.color1 = (max(0.2, 0.2+obj.ttl), max(0.2, 0.5*obj.ttl), max(0.2, 0.5*obj.ttl))\n obj.color2 = (max(0.2, 0.2+obj.ttl), max(0.2, 0.5*obj.ttl), max(0.2, 0.5*obj.ttl))\n\n self._clean_particles(False)\n\n for p in self.sky_polys:\n self.viewer.draw_polygon(p, color=(0, 0, 0))\n\n for obj in self.particles + self.drawlist:\n for f in obj.fixtures:\n trans = f.body.transform\n if type(f.shape) is circleShape:\n t = rendering.Transform(translation=trans*f.shape.pos)\n self.viewer.draw_circle(f.shape.radius, 20, color=obj.color1).add_attr(t)\n self.viewer.draw_circle(f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2).add_attr(t)\n else:\n path = [trans*v for v in f.shape.vertices]\n self.viewer.draw_polygon(path, color=obj.color1)\n path.append(path[0])\n self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)\n\n for x in [self.helipad_x1, self.helipad_x2]:\n flagy1 = self.helipad_y\n flagy2 = flagy1 + 50/SCALE\n self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))\n self.viewer.draw_polygon([(x, flagy2), (x, flagy2-10/SCALE), (x + 25/SCALE, flagy2 - 5/SCALE)],\n color=(0.8, 0.8, 0))\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n\n\nclass LunarLanderContinuous(LunarLander):\n continuous = True\n\ndef heuristic(env, s):\n \"\"\"\n The heuristic for\n 1. Testing\n 2. Demonstration rollout.\n\n Args:\n env: The environment\n s (list): The state. Attributes:\n s[0] is the horizontal coordinate\n s[1] is the vertical coordinate\n s[2] is the horizontal speed\n s[3] is the vertical speed\n s[4] is the angle\n s[5] is the angular speed\n s[6] 1 if first leg has contact, else 0\n s[7] 1 if second leg has contact, else 0\n returns:\n a: The heuristic to be fed into the step function defined above to determine the next step and reward.\n \"\"\"\n\n angle_targ = s[0]*0.5 + s[2]*1.0 # angle should point towards center\n if angle_targ > 0.4: angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad\n if angle_targ < -0.4: angle_targ = -0.4\n hover_targ = 0.55*np.abs(s[0]) # target y should be proportional to horizontal offset\n\n angle_todo = (angle_targ - s[4]) * 0.5 - (s[5])*1.0\n hover_todo = (hover_targ - s[1])*0.5 - (s[3])*0.5\n\n if s[6] or s[7]: # legs have contact\n angle_todo = 0\n hover_todo = -(s[3])*0.5 # override to reduce fall speed, that's all we need after contact\n\n if env.continuous:\n a = np.array([hover_todo*20 - 1, -angle_todo*20])\n a = np.clip(a, -1, +1)\n else:\n a = 0\n if hover_todo > np.abs(angle_todo) and hover_todo > 0.05: a = 2\n elif angle_todo < -0.05: a = 3\n elif angle_todo > +0.05: a = 1\n return a\n\ndef demo_heuristic_lander(env, seed=None, render=False):\n env.seed(seed)\n total_reward = 0\n steps = 0\n s = env.reset()\n while True:\n a = heuristic(env, s)\n s, r, done, info = env.step(a)\n total_reward += r\n\n if render:\n still_open = env.render()\n if still_open == False: break\n\n if steps % 20 == 0 or done:\n print(\"observations:\", \" \".join([\"{:+0.2f}\".format(x) for x in s]))\n print(\"step {} total_reward {:+0.2f}\".format(steps, total_reward))\n steps += 1\n if done: break\n return total_reward\n\n\nif __name__ == '__main__':\n demo_heuristic_lander(LunarLander(), render=True)\n",
"path": "gym/envs/box2d/lunar_lander.py"
}
] | [
{
"content": "\"\"\"\nRocket trajectory optimization is a classic topic in Optimal Control.\n\nAccording to Pontryagin's maximum principle it's optimal to fire engine full throttle or\nturn it off. That's the reason this environment is OK to have discreet actions (engine on or off).\n\nThe landing pad is always at coordinates (0,0). The coordinates are the first two numbers in the state vector.\nReward for moving from the top of the screen to the landing pad and zero speed is about 100..140 points.\nIf the lander moves away from the landing pad it loses reward. The episode finishes if the lander crashes or\ncomes to rest, receiving an additional -100 or +100 points. Each leg with ground contact is +10 points.\nFiring the main engine is -0.3 points each frame. Firing the side engine is -0.03 points each frame.\nSolved is 200 points.\n\nLanding outside the landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land\non its first attempt. Please see the source code for details.\n\nTo see a heuristic landing, run:\n\npython gym/envs/box2d/lunar_lander.py\n\nTo play yourself, run:\n\npython examples/agents/keyboard_agent.py LunarLander-v2\n\nCreated by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.\n\"\"\"\n\n\nimport sys, math\nimport numpy as np\n\nimport Box2D\nfrom Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding, EzPickle\n\nFPS = 50\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nMAIN_ENGINE_POWER = 13.0\nSIDE_ENGINE_POWER = 0.6\n\nINITIAL_RANDOM = 1000.0 # Set 1500 to make game harder\n\nLANDER_POLY =[\n (-14, +17), (-17, 0), (-17 ,-10),\n (+17, -10), (+17, 0), (+14, +17)\n ]\nLEG_AWAY = 20\nLEG_DOWN = 18\nLEG_W, LEG_H = 2, 8\nLEG_SPRING_TORQUE = 40\n\nSIDE_ENGINE_HEIGHT = 14.0\nSIDE_ENGINE_AWAY = 12.0\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\n\nclass ContactDetector(contactListener):\n def __init__(self, env):\n contactListener.__init__(self)\n self.env = env\n\n def BeginContact(self, contact):\n if self.env.lander == contact.fixtureA.body or self.env.lander == contact.fixtureB.body:\n self.env.game_over = True\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = True\n\n def EndContact(self, contact):\n for i in range(2):\n if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:\n self.env.legs[i].ground_contact = False\n\n\nclass LunarLander(gym.Env, EzPickle):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : FPS\n }\n\n continuous = False\n\n def __init__(self):\n EzPickle.__init__(self)\n self.seed()\n self.viewer = None\n\n self.world = Box2D.b2World()\n self.moon = None\n self.lander = None\n self.particles = []\n\n self.prev_reward = None\n\n # useful range is -1 .. +1, but spikes can be higher\n self.observation_space = spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32)\n\n if self.continuous:\n # Action is two floats [main engine, left-right engines].\n # Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.\n # Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off\n self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32)\n else:\n # Nop, fire left engine, main engine, right engine\n self.action_space = spaces.Discrete(4)\n\n self.reset()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _destroy(self):\n if not self.moon: return\n self.world.contactListener = None\n self._clean_particles(True)\n self.world.DestroyBody(self.moon)\n self.moon = None\n self.world.DestroyBody(self.lander)\n self.lander = None\n self.world.DestroyBody(self.legs[0])\n self.world.DestroyBody(self.legs[1])\n\n def reset(self):\n self._destroy()\n self.world.contactListener_keepref = ContactDetector(self)\n self.world.contactListener = self.world.contactListener_keepref\n self.game_over = False\n self.prev_shaping = None\n\n W = VIEWPORT_W/SCALE\n H = VIEWPORT_H/SCALE\n\n # terrain\n CHUNKS = 11\n height = self.np_random.uniform(0, H/2, size=(CHUNKS+1,))\n chunk_x = [W/(CHUNKS-1)*i for i in range(CHUNKS)]\n self.helipad_x1 = chunk_x[CHUNKS//2-1]\n self.helipad_x2 = chunk_x[CHUNKS//2+1]\n self.helipad_y = H/4\n height[CHUNKS//2-2] = self.helipad_y\n height[CHUNKS//2-1] = self.helipad_y\n height[CHUNKS//2+0] = self.helipad_y\n height[CHUNKS//2+1] = self.helipad_y\n height[CHUNKS//2+2] = self.helipad_y\n smooth_y = [0.33*(height[i-1] + height[i+0] + height[i+1]) for i in range(CHUNKS)]\n\n self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))\n self.sky_polys = []\n for i in range(CHUNKS-1):\n p1 = (chunk_x[i], smooth_y[i])\n p2 = (chunk_x[i+1], smooth_y[i+1])\n self.moon.CreateEdgeFixture(\n vertices=[p1,p2],\n density=0,\n friction=0.1)\n self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])\n\n self.moon.color1 = (0.0, 0.0, 0.0)\n self.moon.color2 = (0.0, 0.0, 0.0)\n\n initial_y = VIEWPORT_H/SCALE\n self.lander = self.world.CreateDynamicBody(\n position=(VIEWPORT_W/SCALE/2, initial_y),\n angle=0.0,\n fixtures = fixtureDef(\n shape=polygonShape(vertices=[(x/SCALE, y/SCALE) for x, y in LANDER_POLY]),\n density=5.0,\n friction=0.1,\n categoryBits=0x0010,\n maskBits=0x001, # collide only with ground\n restitution=0.0) # 0.99 bouncy\n )\n self.lander.color1 = (0.5, 0.4, 0.9)\n self.lander.color2 = (0.3, 0.3, 0.5)\n self.lander.ApplyForceToCenter( (\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),\n self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)\n ), True)\n\n self.legs = []\n for i in [-1, +1]:\n leg = self.world.CreateDynamicBody(\n position=(VIEWPORT_W/SCALE/2 - i*LEG_AWAY/SCALE, initial_y),\n angle=(i * 0.05),\n fixtures=fixtureDef(\n shape=polygonShape(box=(LEG_W/SCALE, LEG_H/SCALE)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n )\n leg.ground_contact = False\n leg.color1 = (0.5, 0.4, 0.9)\n leg.color2 = (0.3, 0.3, 0.5)\n rjd = revoluteJointDef(\n bodyA=self.lander,\n bodyB=leg,\n localAnchorA=(0, 0),\n localAnchorB=(i * LEG_AWAY/SCALE, LEG_DOWN/SCALE),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=LEG_SPRING_TORQUE,\n motorSpeed=+0.3 * i # low enough not to jump back into the sky\n )\n if i == -1:\n rjd.lowerAngle = +0.9 - 0.5 # The most esoteric numbers here, angled legs have freedom to travel within\n rjd.upperAngle = +0.9\n else:\n rjd.lowerAngle = -0.9\n rjd.upperAngle = -0.9 + 0.5\n leg.joint = self.world.CreateJoint(rjd)\n self.legs.append(leg)\n\n self.drawlist = [self.lander] + self.legs\n\n return self.step(np.array([0, 0]) if self.continuous else 0)[0]\n\n def _create_particle(self, mass, x, y, ttl):\n p = self.world.CreateDynamicBody(\n position = (x, y),\n angle=0.0,\n fixtures = fixtureDef(\n shape=circleShape(radius=2/SCALE, pos=(0, 0)),\n density=mass,\n friction=0.1,\n categoryBits=0x0100,\n maskBits=0x001, # collide only with ground\n restitution=0.3)\n )\n p.ttl = ttl\n self.particles.append(p)\n self._clean_particles(False)\n return p\n\n def _clean_particles(self, all):\n while self.particles and (all or self.particles[0].ttl < 0):\n self.world.DestroyBody(self.particles.pop(0))\n\n def step(self, action):\n if self.continuous:\n action = np.clip(action, -1, +1).astype(np.float32)\n else:\n assert self.action_space.contains(action), \"%r (%s) invalid \" % (action, type(action))\n\n # Engines\n tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))\n side = (-tip[1], tip[0])\n dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]\n\n m_power = 0.0\n if (self.continuous and action[0] > 0.0) or (not self.continuous and action == 2):\n # Main engine\n if self.continuous:\n m_power = (np.clip(action[0], 0.0,1.0) + 1.0)*0.5 # 0.5..1.0\n assert m_power >= 0.5 and m_power <= 1.0\n else:\n m_power = 1.0\n ox = (tip[0] * (4/SCALE + 2 * dispersion[0]) +\n side[0] * dispersion[1]) # 4 is move a bit downwards, +-2 for randomness\n oy = -tip[1] * (4/SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]\n impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)\n p = self._create_particle(3.5, # 3.5 is here to make particle speed adequate\n impulse_pos[0],\n impulse_pos[1],\n m_power) # particles are just a decoration\n p.ApplyLinearImpulse((ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True)\n self.lander.ApplyLinearImpulse((-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),\n impulse_pos,\n True)\n\n s_power = 0.0\n if (self.continuous and np.abs(action[1]) > 0.5) or (not self.continuous and action in [1, 3]):\n # Orientation engines\n if self.continuous:\n direction = np.sign(action[1])\n s_power = np.clip(np.abs(action[1]), 0.5, 1.0)\n assert s_power >= 0.5 and s_power <= 1.0\n else:\n direction = action-2\n s_power = 1.0\n ox = tip[0] * dispersion[0] + side[0] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY/SCALE)\n oy = -tip[1] * dispersion[0] - side[1] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY/SCALE)\n impulse_pos = (self.lander.position[0] + ox - tip[0] * 17/SCALE,\n self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT/SCALE)\n p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)\n p.ApplyLinearImpulse((ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos\n , True)\n self.lander.ApplyLinearImpulse((-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),\n impulse_pos,\n True)\n\n self.world.Step(1.0/FPS, 6*30, 2*30)\n\n pos = self.lander.position\n vel = self.lander.linearVelocity\n state = [\n (pos.x - VIEWPORT_W/SCALE/2) / (VIEWPORT_W/SCALE/2),\n (pos.y - (self.helipad_y+LEG_DOWN/SCALE)) / (VIEWPORT_H/SCALE/2),\n vel.x*(VIEWPORT_W/SCALE/2)/FPS,\n vel.y*(VIEWPORT_H/SCALE/2)/FPS,\n self.lander.angle,\n 20.0*self.lander.angularVelocity/FPS,\n 1.0 if self.legs[0].ground_contact else 0.0,\n 1.0 if self.legs[1].ground_contact else 0.0\n ]\n assert len(state) == 8\n\n reward = 0\n shaping = \\\n - 100*np.sqrt(state[0]*state[0] + state[1]*state[1]) \\\n - 100*np.sqrt(state[2]*state[2] + state[3]*state[3]) \\\n - 100*abs(state[4]) + 10*state[6] + 10*state[7] # And ten points for legs contact, the idea is if you\n # lose contact again after landing, you get negative reward\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n reward -= m_power*0.30 # less fuel spent is better, about -30 for heuristic landing\n reward -= s_power*0.03\n\n done = False\n if self.game_over or abs(state[0]) >= 1.0:\n done = True\n reward = -100\n if not self.lander.awake:\n done = True\n reward = +100\n return np.array(state, dtype=np.float32), reward, done, {}\n\n def render(self, mode='human'):\n from gym.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(0, VIEWPORT_W/SCALE, 0, VIEWPORT_H/SCALE)\n\n for obj in self.particles:\n obj.ttl -= 0.15\n obj.color1 = (max(0.2, 0.2+obj.ttl), max(0.2, 0.5*obj.ttl), max(0.2, 0.5*obj.ttl))\n obj.color2 = (max(0.2, 0.2+obj.ttl), max(0.2, 0.5*obj.ttl), max(0.2, 0.5*obj.ttl))\n\n self._clean_particles(False)\n\n for p in self.sky_polys:\n self.viewer.draw_polygon(p, color=(0, 0, 0))\n\n for obj in self.particles + self.drawlist:\n for f in obj.fixtures:\n trans = f.body.transform\n if type(f.shape) is circleShape:\n t = rendering.Transform(translation=trans*f.shape.pos)\n self.viewer.draw_circle(f.shape.radius, 20, color=obj.color1).add_attr(t)\n self.viewer.draw_circle(f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2).add_attr(t)\n else:\n path = [trans*v for v in f.shape.vertices]\n self.viewer.draw_polygon(path, color=obj.color1)\n path.append(path[0])\n self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)\n\n for x in [self.helipad_x1, self.helipad_x2]:\n flagy1 = self.helipad_y\n flagy2 = flagy1 + 50/SCALE\n self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))\n self.viewer.draw_polygon([(x, flagy2), (x, flagy2-10/SCALE), (x + 25/SCALE, flagy2 - 5/SCALE)],\n color=(0.8, 0.8, 0))\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n\n\nclass LunarLanderContinuous(LunarLander):\n continuous = True\n\ndef heuristic(env, s):\n \"\"\"\n The heuristic for\n 1. Testing\n 2. Demonstration rollout.\n\n Args:\n env: The environment\n s (list): The state. Attributes:\n s[0] is the horizontal coordinate\n s[1] is the vertical coordinate\n s[2] is the horizontal speed\n s[3] is the vertical speed\n s[4] is the angle\n s[5] is the angular speed\n s[6] 1 if first leg has contact, else 0\n s[7] 1 if second leg has contact, else 0\n returns:\n a: The heuristic to be fed into the step function defined above to determine the next step and reward.\n \"\"\"\n\n angle_targ = s[0]*0.5 + s[2]*1.0 # angle should point towards center\n if angle_targ > 0.4: angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad\n if angle_targ < -0.4: angle_targ = -0.4\n hover_targ = 0.55*np.abs(s[0]) # target y should be proportional to horizontal offset\n\n angle_todo = (angle_targ - s[4]) * 0.5 - (s[5])*1.0\n hover_todo = (hover_targ - s[1])*0.5 - (s[3])*0.5\n\n if s[6] or s[7]: # legs have contact\n angle_todo = 0\n hover_todo = -(s[3])*0.5 # override to reduce fall speed, that's all we need after contact\n\n if env.continuous:\n a = np.array([hover_todo*20 - 1, -angle_todo*20])\n a = np.clip(a, -1, +1)\n else:\n a = 0\n if hover_todo > np.abs(angle_todo) and hover_todo > 0.05: a = 2\n elif angle_todo < -0.05: a = 3\n elif angle_todo > +0.05: a = 1\n return a\n\ndef demo_heuristic_lander(env, seed=None, render=False):\n env.seed(seed)\n total_reward = 0\n steps = 0\n s = env.reset()\n while True:\n a = heuristic(env, s)\n s, r, done, info = env.step(a)\n total_reward += r\n\n if render:\n still_open = env.render()\n if still_open == False: break\n\n if steps % 20 == 0 or done:\n print(\"observations:\", \" \".join([\"{:+0.2f}\".format(x) for x in s]))\n print(\"step {} total_reward {:+0.2f}\".format(steps, total_reward))\n steps += 1\n if done: break\n if render:\n env.close()\n return total_reward\n\n\nif __name__ == '__main__':\n demo_heuristic_lander(LunarLander(), render=True)\n",
"path": "gym/envs/box2d/lunar_lander.py"
}
] | diff --git a/gym/envs/box2d/lunar_lander.py b/gym/envs/box2d/lunar_lander.py
index 247184591f7..b83256cd271 100644
--- a/gym/envs/box2d/lunar_lander.py
+++ b/gym/envs/box2d/lunar_lander.py
@@ -446,6 +446,8 @@ def demo_heuristic_lander(env, seed=None, render=False):
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if done: break
+ if render:
+ env.close()
return total_reward
|
liqd__a4-meinberlin-2857 | ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
https://sentry.liqd.net/sentry/meinberlin-dev/issues/1032/
```
ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
(35 additional frame(s) were not displayed)
...
File "django/templatetags/static.py", line 118, in handle_simple
return staticfiles_storage.url(path)
File "django_cloudflare_push/middleware.py", line 47, in url
return super(DebugStaticFilesStorage, self).url(path)
File "django/contrib/staticfiles/storage.py", line 153, in url
return self._url(self.stored_name, name, force)
File "django/contrib/staticfiles/storage.py", line 132, in _url
hashed_name = hashed_name_func(*args)
File "django/contrib/staticfiles/storage.py", line 420, in stored_name
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
Internal Server Error: /kiezkasse/create/module/kiezkasse-2/
```
| [
{
"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('js/select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n",
"path": "meinberlin/apps/mapideas/forms.py"
}
] | [
{
"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n",
"path": "meinberlin/apps/mapideas/forms.py"
}
] | diff --git a/meinberlin/apps/mapideas/forms.py b/meinberlin/apps/mapideas/forms.py
index 18eaf8c807..e69a32391d 100644
--- a/meinberlin/apps/mapideas/forms.py
+++ b/meinberlin/apps/mapideas/forms.py
@@ -22,7 +22,7 @@ def __init__(self, *args, **kwargs):
'Please locate your proposal on the map.')
class Media:
- js = ('js/select_dropdown_init.js',)
+ js = ('select_dropdown_init.js',)
class Meta:
model = models.MapIdea
|
elastic__apm-agent-python-1466 | Missing HTTP status code since version 6.3.1 using Starlette
**Describe the bug**: HTTP status code is missing when using agent versions > 6.2.3 and Starlette
**To Reproduce**
1. Create a hello world REST service with FastAPI and agent 6.7.2.
2. Send requests
2. APM transactions are uploaded to ES but are missing HTTP status code
**Environment (please complete the following information)**
- OS: Indifferent. Happens on Mac OS X and Docker containers
- Python version: 3.9.7
- Framework and version: Starlette 0.17.1
- APM Server version: 7.12.0
- Agent version: 6.3.1 - 6.7.2
**Additional context**
Add any other context about the problem here.
- Agent config options <!-- be careful not to post sensitive information -->
<details>
<summary>Click to expand</summary>
```
# Nothing special here, app is just the FastAPI instance
def configure_apm(app):
apm_config = {
"SERVICE_NAME": APPLICATION_NAME,
"SERVER_URL": os.environ.get("ELASTIC_APM_SERVER_HOST"),
"SECRET_TOKEN": os.environ.get("ELASTIC_APM_SECRET_TOKEN"),
"ENVIRONMENT": os.environ.get("ENVIRONMENT", "staging").lower(),
}
apm = make_apm_client(apm_config)
app.add_middleware(ElasticAPM, client=apm)
```
</details>
- `requirements.txt`:
<details>
<summary>Click to expand</summary>
```
elastic-apm==6.7.2
starlette==0.17.1
uvicorn==0.17.1
fastapi==0.73.0
```
</details>
- Example APM JSON document using agent 6.7.2:
<details>
```
{
"_index": "apm-7.12.0-transaction-000010",
"_type": "_doc",
"_id": "H-xCBn8BoEtMrRa0MKNx",
"_version": 1,
"_score": null,
"fields": {
"transaction.name.text": [
"GET /test/fran"
],
"user_agent.original.text": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36"
],
"host.hostname": [
"MacBook-Pro-de-Fran.local"
],
"process.pid": [
70040
],
"service.language.name": [
"python"
],
"transaction.result": [
"HTTP 2xx"
],
"user_agent.os.version": [
"10.15.7"
],
"transaction.id": [
"d01f7447213a4374"
],
"http.request.method": [
"GET"
],
"processor.event": [
"transaction"
],
"agent.name": [
"python"
],
"host.name": [
"MacBook-Pro-de-Fran.local"
],
"user_agent.version": [
"98.0.4758.80"
],
"event.outcome": [
"success"
],
"user_agent.original": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36"
],
"process.ppid": [
70038
],
"processor.name": [
"transaction"
],
"transaction.duration.us": [
642
],
"service.runtime.version": [
"3.9.7"
],
"user_agent.name": [
"Chrome"
],
"host.architecture": [
"arm64"
],
"timestamp.us": [
1645077472362757
],
"url.path": [
"/test/fran"
],
"ecs.version": [
"1.8.0"
],
"observer.type": [
"apm-server"
],
"observer.version": [
"7.12.0"
],
"agent.version": [
"6.7.2"
],
"transaction.name": [
"GET /test/fran"
],
"service.framework.version": [
"0.17.1"
],
"observer.name": [
"instance-0000000001"
],
"user_agent.os.full": [
"Mac OS X 10.15.7"
],
"service.node.name": [
"MacBook-Pro-de-Fran.local"
],
"url.scheme": [
"http"
],
"transaction.sampled": [
true
],
"user_agent.os.name": [
"Mac OS X"
],
"host.ip": [
"-"
],
"trace.id": [
"0c161d26c928799b770ccddcf4cfe3c4"
],
"transaction.span_count.dropped": [
0
],
"url.port": [
8000
],
"url.full": [
"http://localhost:8000/test/fran"
],
"service.environment": [
"staging"
],
"service.name": [
"test"
],
"service.framework.name": [
"starlette"
],
"service.runtime.name": [
"CPython"
],
"process.args": [
"/Users/fgarcia/miniconda3/envs/test-rest/lib/python3.9/site-packages/uvicorn/__main__.py",
"app.main:app",
"--reload"
],
"observer.version_major": [
7
],
"observer.hostname": [
"c2c026e5b645"
],
"transaction.type": [
"request"
],
"event.ingested": [
"2022-02-17T05:57:55.440Z"
],
"@timestamp": [
"2022-02-17T05:57:52.362Z"
],
"host.os.platform": [
"darwin"
],
"service.language.version": [
"3.9.7"
],
"url.domain": [
"localhost"
],
"user_agent.device.name": [
"Mac"
]
},
"highlight": {
"host.architecture": [
"@kibana-highlighted-field@arm64@/kibana-highlighted-field@"
],
"service.name": [
"@kibana-highlighted-field@test@/kibana-highlighted-field@"
],
"service.framework.name": [
"@kibana-highlighted-field@starlette@/kibana-highlighted-field@"
],
"processor.name": [
"@kibana-highlighted-field@transaction@/kibana-highlighted-field@"
],
"agent.version": [
"@[email protected]@/kibana-highlighted-field@"
]
},
"sort": [
1645077472362
]
}
```
</details>
- Example APM JSON document using agent 6.2.3:
<details>
```
{
"_index": "apm-7.12.0-transaction-000010",
"_type": "_doc",
"_id": "oOw-Bn8BoEtMrRa0M5-0",
"_version": 1,
"_score": null,
"fields": {
"transaction.name.text": [
"GET /test/fran"
],
"user_agent.original.text": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36"
],
"host.hostname": [
"MacBook-Pro-de-Fran.local"
],
"process.pid": [
69858
],
"service.language.name": [
"python"
],
"transaction.result": [
"HTTP 2xx"
],
"user_agent.os.version": [
"10.15.7"
],
"transaction.id": [
"ab3e2d9c98d72380"
],
"http.request.method": [
"GET"
],
"processor.event": [
"transaction"
],
"agent.name": [
"python"
],
"host.name": [
"MacBook-Pro-de-Fran.local"
],
"user_agent.version": [
"-"
],
"http.response.status_code": [
200
],
"event.outcome": [
"success"
],
"user_agent.original": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36"
],
"process.ppid": [
69856
],
"processor.name": [
"transaction"
],
"transaction.duration.us": [
656
],
"service.runtime.version": [
"3.9.7"
],
"user_agent.name": [
"Chrome"
],
"host.architecture": [
"arm64"
],
"timestamp.us": [
1645077212632517
],
"url.path": [
"/test/fran"
],
"ecs.version": [
"1.8.0"
],
"observer.type": [
"apm-server"
],
"observer.version": [
"7.12.0"
],
"agent.version": [
"6.2.3"
],
"transaction.name": [
"GET /test/fran"
],
"service.framework.version": [
"0.17.1"
],
"observer.name": [
"instance-0000000001"
],
"user_agent.os.full": [
"Mac OS X 10.15.7"
],
"service.node.name": [
"MacBook-Pro-de-Fran.local"
],
"url.scheme": [
"http"
],
"transaction.sampled": [
true
],
"user_agent.os.name": [
"Mac OS X"
],
"host.ip": [
"-"
],
"trace.id": [
"527836b27e7cfbe629eedca1f073ad38"
],
"transaction.span_count.dropped": [
0
],
"url.port": [
8000
],
"url.full": [
"http://localhost:8000/test/fran"
],
"service.environment": [
"staging"
],
"service.name": [
"test"
],
"service.framework.name": [
"starlette"
],
"service.runtime.name": [
"CPython"
],
"process.args": [
"/Users/fgarcia/miniconda3/envs/test-rest/lib/python3.9/site-packages/uvicorn/__main__.py",
"app.main:app",
"--reload"
],
"observer.version_major": [
7
],
"observer.hostname": [
"c2c026e5b645"
],
"transaction.type": [
"request"
],
"event.ingested": [
"2022-02-17T05:53:34.130Z"
],
"@timestamp": [
"2022-02-17T05:53:32.632Z"
],
"host.os.platform": [
"darwin"
],
"service.language.version": [
"3.9.7"
],
"url.domain": [
"localhost"
],
"user_agent.device.name": [
"Mac"
]
},
"highlight": {
"service.name": [
"@kibana-highlighted-field@test@/kibana-highlighted-field@"
],
"service.framework.name": [
"@kibana-highlighted-field@starlette@/kibana-highlighted-field@"
]
},
"sort": [
1645077212632
]
}
```
</details>
| [
{
"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport asyncio\n\nfrom starlette.datastructures import Headers\nfrom starlette.requests import Request\nfrom starlette.types import Message\n\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.utils import get_url_dict\n\n\nasync def get_data_from_request(request: Request, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from incoming request for APM capturing.\n\n Args:\n request (Request)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {\n \"method\": request.method,\n \"socket\": {\"remote_address\": _get_client_ip(request)},\n \"cookies\": request.cookies,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY:\n if config.capture_body not in (\"all\", event_type):\n result[\"body\"] = \"[REDACTED]\"\n else:\n body = None\n try:\n body = await get_body(request)\n except Exception:\n pass\n if body is not None:\n result[\"body\"] = body\n\n result[\"url\"] = get_url_dict(str(request.url))\n\n return result\n\n\nasync def get_data_from_response(message: dict, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from response for APM capturing.\n\n Args:\n message (dict)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {}\n\n if \"status_code\" in message:\n result[\"status_code\"] = message[\"status\"]\n\n if config.capture_headers and \"headers\" in message:\n headers = Headers(raw=message[\"headers\"])\n result[\"headers\"] = {key: \";\".join(headers.getlist(key)) for key in headers.keys()}\n\n return result\n\n\nasync def set_body(request: Request, body: bytes):\n \"\"\"Overwrites body in Starlette.\n\n Args:\n request (Request)\n body (bytes)\n \"\"\"\n\n async def receive() -> Message:\n await asyncio.sleep(0)\n return {\"type\": \"http.request\", \"body\": body}\n\n request._receive = receive\n\n\nasync def get_body(request: Request) -> str:\n \"\"\"Gets body from the request.\n\n When we consume the body, we replace the streaming mechanism with\n a mocked version -- this workaround came from\n https://github.com/encode/starlette/issues/495#issuecomment-513138055\n\n Args:\n request (Request)\n\n Returns:\n str\n \"\"\"\n body = await request.body()\n await set_body(request, body)\n\n request._stream_consumed = False\n\n return body.decode(\"utf-8\")\n\n\nasync def query_params_to_dict(query_params: str) -> dict:\n \"\"\"Transforms query params from URL to dictionary\n\n Args:\n query_params (str)\n\n Returns:\n dict\n\n Examples:\n >>> print(query_params_to_dict(b\"key=val&key2=val2\"))\n {\"key\": \"val\", \"key2\": \"val2\"}\n \"\"\"\n query_params = query_params.split(\"&\")\n res = {}\n for param in query_params:\n key, val = param.split(\"=\")\n res[key] = val\n\n return res\n\n\ndef _get_client_ip(request: Request):\n x_forwarded_for = request.headers.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.headers.get(\"REMOTE_ADDR\")\n return ip\n",
"path": "elasticapm/contrib/starlette/utils.py"
}
] | [
{
"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport asyncio\n\nfrom starlette.datastructures import Headers\nfrom starlette.requests import Request\nfrom starlette.types import Message\n\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.utils import get_url_dict\n\n\nasync def get_data_from_request(request: Request, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from incoming request for APM capturing.\n\n Args:\n request (Request)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {\n \"method\": request.method,\n \"socket\": {\"remote_address\": _get_client_ip(request)},\n \"cookies\": request.cookies,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY:\n if config.capture_body not in (\"all\", event_type):\n result[\"body\"] = \"[REDACTED]\"\n else:\n body = None\n try:\n body = await get_body(request)\n except Exception:\n pass\n if body is not None:\n result[\"body\"] = body\n\n result[\"url\"] = get_url_dict(str(request.url))\n\n return result\n\n\nasync def get_data_from_response(message: dict, config: Config, event_type: str) -> dict:\n \"\"\"Loads data from response for APM capturing.\n\n Args:\n message (dict)\n config (Config)\n event_type (str)\n\n Returns:\n dict\n \"\"\"\n result = {}\n\n if \"status\" in message:\n result[\"status_code\"] = message[\"status\"]\n\n if config.capture_headers and \"headers\" in message:\n headers = Headers(raw=message[\"headers\"])\n result[\"headers\"] = {key: \";\".join(headers.getlist(key)) for key in headers.keys()}\n\n return result\n\n\nasync def set_body(request: Request, body: bytes):\n \"\"\"Overwrites body in Starlette.\n\n Args:\n request (Request)\n body (bytes)\n \"\"\"\n\n async def receive() -> Message:\n await asyncio.sleep(0)\n return {\"type\": \"http.request\", \"body\": body}\n\n request._receive = receive\n\n\nasync def get_body(request: Request) -> str:\n \"\"\"Gets body from the request.\n\n When we consume the body, we replace the streaming mechanism with\n a mocked version -- this workaround came from\n https://github.com/encode/starlette/issues/495#issuecomment-513138055\n\n Args:\n request (Request)\n\n Returns:\n str\n \"\"\"\n body = await request.body()\n await set_body(request, body)\n\n request._stream_consumed = False\n\n return body.decode(\"utf-8\")\n\n\nasync def query_params_to_dict(query_params: str) -> dict:\n \"\"\"Transforms query params from URL to dictionary\n\n Args:\n query_params (str)\n\n Returns:\n dict\n\n Examples:\n >>> print(query_params_to_dict(b\"key=val&key2=val2\"))\n {\"key\": \"val\", \"key2\": \"val2\"}\n \"\"\"\n query_params = query_params.split(\"&\")\n res = {}\n for param in query_params:\n key, val = param.split(\"=\")\n res[key] = val\n\n return res\n\n\ndef _get_client_ip(request: Request):\n x_forwarded_for = request.headers.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.headers.get(\"REMOTE_ADDR\")\n return ip\n",
"path": "elasticapm/contrib/starlette/utils.py"
}
] | diff --git a/elasticapm/contrib/starlette/utils.py b/elasticapm/contrib/starlette/utils.py
index 48ac251bd..f06c19055 100644
--- a/elasticapm/contrib/starlette/utils.py
+++ b/elasticapm/contrib/starlette/utils.py
@@ -86,7 +86,7 @@ async def get_data_from_response(message: dict, config: Config, event_type: str)
"""
result = {}
- if "status_code" in message:
+ if "status" in message:
result["status_code"] = message["status"]
if config.capture_headers and "headers" in message:
diff --git a/tests/contrib/asyncio/starlette_tests.py b/tests/contrib/asyncio/starlette_tests.py
index dc11aa021..99b3cc1d3 100644
--- a/tests/contrib/asyncio/starlette_tests.py
+++ b/tests/contrib/asyncio/starlette_tests.py
@@ -146,6 +146,10 @@ def test_get(app, elasticapm_client):
assert request["method"] == "GET"
assert request["socket"] == {"remote_address": "127.0.0.1"}
+ response = transaction["context"]["response"]
+ assert response["status_code"] == 200
+ assert response["headers"]["content-type"] == "text/plain; charset=utf-8"
+
assert span["name"] == "test"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.